prompt
stringlengths
162
4.26M
response
stringlengths
109
5.16M
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_337( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19] wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File regfile.scala: //****************************************************************************** // Copyright (c) 2013 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Register File (Abstract class and Synthesizable RegFile) //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v4.exu import scala.collection.mutable.ArrayBuffer import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import boom.v4.common._ import boom.v4.util._ abstract class RegisterFile[T <: Data]( dType: T, numRegisters: Int, numReadPorts: Int, numWritePorts: Int) (implicit p: Parameters) extends BoomModule { val io = IO(new BoomBundle { val arb_read_reqs = Vec(numReadPorts, Flipped(Decoupled(UInt(log2Ceil(numRegisters).W)))) val rrd_read_resps = Vec(numReadPorts, Output(dType)) val write_ports = Vec(numWritePorts, Flipped(Valid(new Bundle { val addr = Output(UInt(maxPregSz.W)) val data = Output(dType) }))) }) // ensure there is only 1 writer per register (unless to preg0) if (numWritePorts > 1) { for (i <- 0 until (numWritePorts - 1)) { for (j <- (i + 1) until numWritePorts) { assert(!io.write_ports(i).valid || !io.write_ports(j).valid || (io.write_ports(i).bits.addr =/= io.write_ports(j).bits.addr), "[regfile] too many writers a register") } } } } class BankedRF[T <: Data]( dType: T, numBanks: Int, numLogicalReadPortsPerBank: Int, numRegisters: Int, numLogicalReadPorts: Int, numPhysicalReadPorts: Int, numWritePorts: Int, bankedWritePortArray: Seq[Option[Int]], typeStr: String )(implicit p: Parameters) extends RegisterFile(dType, numRegisters, numLogicalReadPorts, numWritePorts) { require(isPow2(numBanks)) require(numRegisters % numBanks == 0) require(bankedWritePortArray.length == numWritePorts) val numDedicatedWritePorts = bankedWritePortArray.flatten.length val writePortsPerBank = if (numDedicatedWritePorts == 0) { numWritePorts } else { numWritePorts - numDedicatedWritePorts + 1 } def bankIdx(i: UInt): UInt = i(log2Ceil(numBanks)-1,0) val rfs = (0 until numBanks) map { w => Module(new PartiallyPortedRF( dType, numRegisters / numBanks, numLogicalReadPortsPerBank, numPhysicalReadPorts, writePortsPerBank, typeStr + s" Bank ${w}" )) } if (numBanks == 1) { require(numLogicalReadPortsPerBank == numLogicalReadPorts) io <> rfs(0).io } else { val widxs = Array.fill(numBanks)(0) for (i <- 0 until numWritePorts) { if (bankedWritePortArray(i) != None) { val bank = bankedWritePortArray(i).get val widx = widxs(bank) rfs(bank).io.write_ports(widx).valid := io.write_ports(i).valid rfs(bank).io.write_ports(widx).bits.addr := io.write_ports(i).bits.addr >> log2Ceil(numBanks) rfs(bank).io.write_ports(widx).bits.data := io.write_ports(i).bits.data assert(!io.write_ports(i).valid || bankIdx(io.write_ports(i).bits.addr) === bank.U) widxs(bank) = widx + 1 } else { for (w <- 0 until numBanks) { val widx = widxs(w) rfs(w).io.write_ports(widx).valid := io.write_ports(i).valid && bankIdx(io.write_ports(i).bits.addr) === w.U rfs(w).io.write_ports(widx).bits.addr := io.write_ports(i).bits.addr >> log2Ceil(numBanks) rfs(w).io.write_ports(widx).bits.data := io.write_ports(i).bits.data widxs(w) = widx + 1 } } } require(widxs.forall(_ == writePortsPerBank), widxs.mkString(",")) if (numLogicalReadPortsPerBank == numLogicalReadPorts) { for (i <- 0 until numLogicalReadPorts) { val bidx = bankIdx(io.arb_read_reqs(i).bits) for (w <- 0 until numBanks) { rfs(w).io.arb_read_reqs(i).valid := io.arb_read_reqs(i).valid && bankIdx(io.arb_read_reqs(i).bits) === w.U rfs(w).io.arb_read_reqs(i).bits := io.arb_read_reqs(i).bits >> log2Ceil(numBanks) } val arb_data_sel = UIntToOH(bidx) val rrd_data_sel = RegNext(arb_data_sel) io.arb_read_reqs(i).ready := Mux1H(arb_data_sel, rfs.map(_.io.arb_read_reqs(i).ready)) io.rrd_read_resps(i) := Mux1H(rrd_data_sel, rfs.map(_.io.rrd_read_resps(i))) } } } override def toString: String = rfs.map(_.toString).mkString } class PartiallyPortedRF[T <: Data]( dType: T, numRegisters: Int, numLogicalReadPorts: Int, numPhysicalReadPorts: Int, numWritePorts: Int, typeStr: String )(implicit p: Parameters) extends RegisterFile(dType, numRegisters, numLogicalReadPorts, numWritePorts) { val rf = Module(new FullyPortedRF( dType = dType, numRegisters = numRegisters, numReadPorts = numPhysicalReadPorts, numWritePorts = numWritePorts, typeStr = "Partially Ported " + typeStr, )) rf.io.write_ports := io.write_ports val port_issued = Array.fill(numPhysicalReadPorts) { false.B } val port_addrs = Array.fill(numPhysicalReadPorts) { 0.U(log2Ceil(numRegisters).W) } val data_sels = Wire(Vec(numLogicalReadPorts , UInt(numPhysicalReadPorts.W))) data_sels := DontCare for (i <- 0 until numLogicalReadPorts) { var read_issued = false.B for (j <- 0 until numPhysicalReadPorts) { val issue_read = WireInit(false.B) val use_port = WireInit(false.B) when (!read_issued && !port_issued(j) && io.arb_read_reqs(i).valid) { issue_read := true.B use_port := true.B data_sels(i) := UIntToOH(j.U) } val was_port_issued_yet = port_issued(j) port_issued(j) = use_port || port_issued(j) port_addrs(j) = port_addrs(j) | Mux(was_port_issued_yet || !use_port, 0.U, io.arb_read_reqs(i).bits) read_issued = issue_read || read_issued } io.arb_read_reqs(i).ready := PopCount(io.arb_read_reqs.take(i).map(_.valid)) < numPhysicalReadPorts.U assert(!(io.arb_read_reqs(i).fire && !read_issued)) } for (j <- 0 until numPhysicalReadPorts) { rf.io.arb_read_reqs(j).valid := port_issued(j) rf.io.arb_read_reqs(j).bits := port_addrs(j) assert(rf.io.arb_read_reqs(j).ready) } val rrd_data_sels = RegNext(data_sels) for (i <- 0 until numLogicalReadPorts) { io.rrd_read_resps(i) := Mux1H(rrd_data_sels(i).asBools, rf.io.rrd_read_resps) } override def toString: String = rf.toString } class FullyPortedRF[T <: Data]( dType: T, numRegisters: Int, numReadPorts: Int, numWritePorts: Int, typeStr: String, )(implicit p: Parameters) extends RegisterFile(dType, numRegisters, numReadPorts, numWritePorts) { val rf_cost = (numReadPorts + numWritePorts) * (numReadPorts + 2*numWritePorts) override def toString: String = BoomCoreStringPrefix( "==" + typeStr + " Regfile==", "Num RF Read Ports : " + numReadPorts, "Num RF Write Ports : " + numWritePorts, "RF Cost (R+W)*(R+2W) : " + rf_cost) io.arb_read_reqs.map(p => p.ready := true.B) val regfile = Mem(numRegisters, dType) (0 until numReadPorts) map {p => io.rrd_read_resps(p) := regfile(RegNext(io.arb_read_reqs(p).bits)) } io.write_ports map { p => when (p.valid) { regfile(p.bits.addr) := p.bits.data }} }
module PartiallyPortedRF_1( // @[regfile.scala:128:7] input clock, // @[regfile.scala:128:7] input reset, // @[regfile.scala:128:7] input io_arb_read_reqs_0_valid, // @[regfile.scala:31:14] input [5:0] io_arb_read_reqs_0_bits, // @[regfile.scala:31:14] input io_arb_read_reqs_1_valid, // @[regfile.scala:31:14] input [5:0] io_arb_read_reqs_1_bits, // @[regfile.scala:31:14] input io_arb_read_reqs_2_valid, // @[regfile.scala:31:14] input [5:0] io_arb_read_reqs_2_bits, // @[regfile.scala:31:14] input io_arb_read_reqs_3_valid, // @[regfile.scala:31:14] input [5:0] io_arb_read_reqs_3_bits, // @[regfile.scala:31:14] output io_arb_read_reqs_4_ready, // @[regfile.scala:31:14] input io_arb_read_reqs_4_valid, // @[regfile.scala:31:14] input [5:0] io_arb_read_reqs_4_bits, // @[regfile.scala:31:14] output io_arb_read_reqs_5_ready, // @[regfile.scala:31:14] input io_arb_read_reqs_5_valid, // @[regfile.scala:31:14] input [5:0] io_arb_read_reqs_5_bits, // @[regfile.scala:31:14] output io_arb_read_reqs_6_ready, // @[regfile.scala:31:14] input io_arb_read_reqs_6_valid, // @[regfile.scala:31:14] input [5:0] io_arb_read_reqs_6_bits, // @[regfile.scala:31:14] output io_arb_read_reqs_7_ready, // @[regfile.scala:31:14] input io_arb_read_reqs_7_valid, // @[regfile.scala:31:14] input [5:0] io_arb_read_reqs_7_bits, // @[regfile.scala:31:14] output io_arb_read_reqs_8_ready, // @[regfile.scala:31:14] input io_arb_read_reqs_8_valid, // @[regfile.scala:31:14] input [5:0] io_arb_read_reqs_8_bits, // @[regfile.scala:31:14] output io_arb_read_reqs_9_ready, // @[regfile.scala:31:14] input io_arb_read_reqs_9_valid, // @[regfile.scala:31:14] input [5:0] io_arb_read_reqs_9_bits, // @[regfile.scala:31:14] output [63:0] io_rrd_read_resps_0, // @[regfile.scala:31:14] output [63:0] io_rrd_read_resps_1, // @[regfile.scala:31:14] output [63:0] io_rrd_read_resps_2, // @[regfile.scala:31:14] output [63:0] io_rrd_read_resps_3, // @[regfile.scala:31:14] output [63:0] io_rrd_read_resps_4, // @[regfile.scala:31:14] output [63:0] io_rrd_read_resps_5, // @[regfile.scala:31:14] output [63:0] io_rrd_read_resps_6, // @[regfile.scala:31:14] output [63:0] io_rrd_read_resps_7, // @[regfile.scala:31:14] output [63:0] io_rrd_read_resps_8, // @[regfile.scala:31:14] output [63:0] io_rrd_read_resps_9, // @[regfile.scala:31:14] input io_write_ports_0_valid, // @[regfile.scala:31:14] input [6:0] io_write_ports_0_bits_addr, // @[regfile.scala:31:14] input [63:0] io_write_ports_0_bits_data, // @[regfile.scala:31:14] input io_write_ports_1_valid, // @[regfile.scala:31:14] input [6:0] io_write_ports_1_bits_addr, // @[regfile.scala:31:14] input [63:0] io_write_ports_1_bits_data, // @[regfile.scala:31:14] input io_write_ports_2_valid, // @[regfile.scala:31:14] input [6:0] io_write_ports_2_bits_addr, // @[regfile.scala:31:14] input [63:0] io_write_ports_2_bits_data, // @[regfile.scala:31:14] input io_write_ports_3_valid, // @[regfile.scala:31:14] input [6:0] io_write_ports_3_bits_addr, // @[regfile.scala:31:14] input [63:0] io_write_ports_3_bits_data, // @[regfile.scala:31:14] input io_write_ports_4_valid, // @[regfile.scala:31:14] input [6:0] io_write_ports_4_bits_addr, // @[regfile.scala:31:14] input [63:0] io_write_ports_4_bits_data // @[regfile.scala:31:14] ); wire [63:0] _rf_io_rrd_read_resps_0; // @[regfile.scala:138:18] wire [63:0] _rf_io_rrd_read_resps_1; // @[regfile.scala:138:18] wire [63:0] _rf_io_rrd_read_resps_2; // @[regfile.scala:138:18] wire [63:0] _rf_io_rrd_read_resps_3; // @[regfile.scala:138:18] wire [63:0] _rf_io_rrd_read_resps_4; // @[regfile.scala:138:18] wire [63:0] _rf_io_rrd_read_resps_5; // @[regfile.scala:138:18] wire io_arb_read_reqs_0_valid_0 = io_arb_read_reqs_0_valid; // @[regfile.scala:128:7] wire [5:0] io_arb_read_reqs_0_bits_0 = io_arb_read_reqs_0_bits; // @[regfile.scala:128:7] wire io_arb_read_reqs_1_valid_0 = io_arb_read_reqs_1_valid; // @[regfile.scala:128:7] wire [5:0] io_arb_read_reqs_1_bits_0 = io_arb_read_reqs_1_bits; // @[regfile.scala:128:7] wire io_arb_read_reqs_2_valid_0 = io_arb_read_reqs_2_valid; // @[regfile.scala:128:7] wire [5:0] io_arb_read_reqs_2_bits_0 = io_arb_read_reqs_2_bits; // @[regfile.scala:128:7] wire io_arb_read_reqs_3_valid_0 = io_arb_read_reqs_3_valid; // @[regfile.scala:128:7] wire [5:0] io_arb_read_reqs_3_bits_0 = io_arb_read_reqs_3_bits; // @[regfile.scala:128:7] wire io_arb_read_reqs_4_valid_0 = io_arb_read_reqs_4_valid; // @[regfile.scala:128:7] wire [5:0] io_arb_read_reqs_4_bits_0 = io_arb_read_reqs_4_bits; // @[regfile.scala:128:7] wire io_arb_read_reqs_5_valid_0 = io_arb_read_reqs_5_valid; // @[regfile.scala:128:7] wire [5:0] io_arb_read_reqs_5_bits_0 = io_arb_read_reqs_5_bits; // @[regfile.scala:128:7] wire io_arb_read_reqs_6_valid_0 = io_arb_read_reqs_6_valid; // @[regfile.scala:128:7] wire [5:0] io_arb_read_reqs_6_bits_0 = io_arb_read_reqs_6_bits; // @[regfile.scala:128:7] wire io_arb_read_reqs_7_valid_0 = io_arb_read_reqs_7_valid; // @[regfile.scala:128:7] wire [5:0] io_arb_read_reqs_7_bits_0 = io_arb_read_reqs_7_bits; // @[regfile.scala:128:7] wire io_arb_read_reqs_8_valid_0 = io_arb_read_reqs_8_valid; // @[regfile.scala:128:7] wire [5:0] io_arb_read_reqs_8_bits_0 = io_arb_read_reqs_8_bits; // @[regfile.scala:128:7] wire io_arb_read_reqs_9_valid_0 = io_arb_read_reqs_9_valid; // @[regfile.scala:128:7] wire [5:0] io_arb_read_reqs_9_bits_0 = io_arb_read_reqs_9_bits; // @[regfile.scala:128:7] wire io_write_ports_0_valid_0 = io_write_ports_0_valid; // @[regfile.scala:128:7] wire [6:0] io_write_ports_0_bits_addr_0 = io_write_ports_0_bits_addr; // @[regfile.scala:128:7] wire [63:0] io_write_ports_0_bits_data_0 = io_write_ports_0_bits_data; // @[regfile.scala:128:7] wire io_write_ports_1_valid_0 = io_write_ports_1_valid; // @[regfile.scala:128:7] wire [6:0] io_write_ports_1_bits_addr_0 = io_write_ports_1_bits_addr; // @[regfile.scala:128:7] wire [63:0] io_write_ports_1_bits_data_0 = io_write_ports_1_bits_data; // @[regfile.scala:128:7] wire io_write_ports_2_valid_0 = io_write_ports_2_valid; // @[regfile.scala:128:7] wire [6:0] io_write_ports_2_bits_addr_0 = io_write_ports_2_bits_addr; // @[regfile.scala:128:7] wire [63:0] io_write_ports_2_bits_data_0 = io_write_ports_2_bits_data; // @[regfile.scala:128:7] wire io_write_ports_3_valid_0 = io_write_ports_3_valid; // @[regfile.scala:128:7] wire [6:0] io_write_ports_3_bits_addr_0 = io_write_ports_3_bits_addr; // @[regfile.scala:128:7] wire [63:0] io_write_ports_3_bits_data_0 = io_write_ports_3_bits_data; // @[regfile.scala:128:7] wire io_write_ports_4_valid_0 = io_write_ports_4_valid; // @[regfile.scala:128:7] wire [6:0] io_write_ports_4_bits_addr_0 = io_write_ports_4_bits_addr; // @[regfile.scala:128:7] wire [63:0] io_write_ports_4_bits_data_0 = io_write_ports_4_bits_data; // @[regfile.scala:128:7] wire io_arb_read_reqs_0_ready = 1'h1; // @[regfile.scala:128:7] wire io_arb_read_reqs_1_ready = 1'h1; // @[regfile.scala:128:7] wire io_arb_read_reqs_2_ready = 1'h1; // @[regfile.scala:128:7] wire io_arb_read_reqs_3_ready = 1'h1; // @[regfile.scala:128:7] wire _io_arb_read_reqs_0_ready_T = 1'h1; // @[regfile.scala:167:82] wire _io_arb_read_reqs_1_ready_T = 1'h1; // @[regfile.scala:167:82] wire _io_arb_read_reqs_2_ready_T_2 = 1'h1; // @[regfile.scala:167:82] wire _io_arb_read_reqs_3_ready_T_4 = 1'h1; // @[regfile.scala:167:82] wire [7:0] _data_sels_0_T_5 = 8'h20; // @[OneHot.scala:58:35] wire [7:0] _data_sels_1_T_5 = 8'h20; // @[OneHot.scala:58:35] wire [7:0] _data_sels_2_T_5 = 8'h20; // @[OneHot.scala:58:35] wire [7:0] _data_sels_3_T_5 = 8'h20; // @[OneHot.scala:58:35] wire [7:0] _data_sels_4_T_5 = 8'h20; // @[OneHot.scala:58:35] wire [7:0] _data_sels_5_T_5 = 8'h20; // @[OneHot.scala:58:35] wire [7:0] _data_sels_6_T_5 = 8'h20; // @[OneHot.scala:58:35] wire [7:0] _data_sels_7_T_5 = 8'h20; // @[OneHot.scala:58:35] wire [7:0] _data_sels_8_T_5 = 8'h20; // @[OneHot.scala:58:35] wire [7:0] _data_sels_9_T_5 = 8'h20; // @[OneHot.scala:58:35] wire [7:0] _data_sels_0_T_4 = 8'h10; // @[OneHot.scala:58:35] wire [7:0] _data_sels_1_T_4 = 8'h10; // @[OneHot.scala:58:35] wire [7:0] _data_sels_2_T_4 = 8'h10; // @[OneHot.scala:58:35] wire [7:0] _data_sels_3_T_4 = 8'h10; // @[OneHot.scala:58:35] wire [7:0] _data_sels_4_T_4 = 8'h10; // @[OneHot.scala:58:35] wire [7:0] _data_sels_5_T_4 = 8'h10; // @[OneHot.scala:58:35] wire [7:0] _data_sels_6_T_4 = 8'h10; // @[OneHot.scala:58:35] wire [7:0] _data_sels_7_T_4 = 8'h10; // @[OneHot.scala:58:35] wire [7:0] _data_sels_8_T_4 = 8'h10; // @[OneHot.scala:58:35] wire [7:0] _data_sels_9_T_4 = 8'h10; // @[OneHot.scala:58:35] wire [3:0] _data_sels_0_T_3 = 4'h8; // @[OneHot.scala:58:35] wire [3:0] _data_sels_1_T_3 = 4'h8; // @[OneHot.scala:58:35] wire [3:0] _data_sels_2_T_3 = 4'h8; // @[OneHot.scala:58:35] wire [3:0] _data_sels_3_T_3 = 4'h8; // @[OneHot.scala:58:35] wire [3:0] _data_sels_4_T_3 = 4'h8; // @[OneHot.scala:58:35] wire [3:0] _data_sels_5_T_3 = 4'h8; // @[OneHot.scala:58:35] wire [3:0] _data_sels_6_T_3 = 4'h8; // @[OneHot.scala:58:35] wire [3:0] _data_sels_7_T_3 = 4'h8; // @[OneHot.scala:58:35] wire [3:0] _data_sels_8_T_3 = 4'h8; // @[OneHot.scala:58:35] wire [3:0] _data_sels_9_T_3 = 4'h8; // @[OneHot.scala:58:35] wire [3:0] _data_sels_0_T_2 = 4'h4; // @[OneHot.scala:58:35] wire [3:0] _data_sels_1_T_2 = 4'h4; // @[OneHot.scala:58:35] wire [3:0] _data_sels_2_T_2 = 4'h4; // @[OneHot.scala:58:35] wire [3:0] _data_sels_3_T_2 = 4'h4; // @[OneHot.scala:58:35] wire [3:0] _data_sels_4_T_2 = 4'h4; // @[OneHot.scala:58:35] wire [3:0] _data_sels_5_T_2 = 4'h4; // @[OneHot.scala:58:35] wire [3:0] _data_sels_6_T_2 = 4'h4; // @[OneHot.scala:58:35] wire [3:0] _data_sels_7_T_2 = 4'h4; // @[OneHot.scala:58:35] wire [3:0] _data_sels_8_T_2 = 4'h4; // @[OneHot.scala:58:35] wire [3:0] _data_sels_9_T_2 = 4'h4; // @[OneHot.scala:58:35] wire [1:0] _data_sels_0_T_1 = 2'h2; // @[OneHot.scala:58:35] wire [1:0] _data_sels_1_T_1 = 2'h2; // @[OneHot.scala:58:35] wire [1:0] _data_sels_2_T_1 = 2'h2; // @[OneHot.scala:58:35] wire [1:0] _data_sels_3_T_1 = 2'h2; // @[OneHot.scala:58:35] wire [1:0] _data_sels_4_T_1 = 2'h2; // @[OneHot.scala:58:35] wire [1:0] _data_sels_5_T_1 = 2'h2; // @[OneHot.scala:58:35] wire [1:0] _data_sels_6_T_1 = 2'h2; // @[OneHot.scala:58:35] wire [1:0] _data_sels_7_T_1 = 2'h2; // @[OneHot.scala:58:35] wire [1:0] _data_sels_8_T_1 = 2'h2; // @[OneHot.scala:58:35] wire [1:0] _data_sels_9_T_1 = 2'h2; // @[OneHot.scala:58:35] wire [1:0] _data_sels_0_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _data_sels_1_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _data_sels_2_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _data_sels_3_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _data_sels_4_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _data_sels_5_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _data_sels_6_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _data_sels_7_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _data_sels_8_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _data_sels_9_T = 2'h1; // @[OneHot.scala:58:35] wire issue_read = io_arb_read_reqs_0_valid_0; // @[regfile.scala:128:7, :155:32] wire use_port = io_arb_read_reqs_0_valid_0; // @[regfile.scala:128:7, :156:30] wire _io_arb_read_reqs_4_ready_T_6; // @[regfile.scala:167:82] wire _io_arb_read_reqs_5_ready_T_8; // @[regfile.scala:167:82] wire _io_arb_read_reqs_6_ready_T_10; // @[regfile.scala:167:82] wire _io_arb_read_reqs_7_ready_T_12; // @[regfile.scala:167:82] wire _io_arb_read_reqs_8_ready_T_14; // @[regfile.scala:167:82] wire _io_arb_read_reqs_9_ready_T_16; // @[regfile.scala:167:82] wire [63:0] _io_rrd_read_resps_0_WIRE; // @[Mux.scala:30:73] wire [63:0] _io_rrd_read_resps_1_WIRE; // @[Mux.scala:30:73] wire [63:0] _io_rrd_read_resps_2_WIRE; // @[Mux.scala:30:73] wire [63:0] _io_rrd_read_resps_3_WIRE; // @[Mux.scala:30:73] wire [63:0] _io_rrd_read_resps_4_WIRE; // @[Mux.scala:30:73] wire [63:0] _io_rrd_read_resps_5_WIRE; // @[Mux.scala:30:73] wire [63:0] _io_rrd_read_resps_6_WIRE; // @[Mux.scala:30:73] wire [63:0] _io_rrd_read_resps_7_WIRE; // @[Mux.scala:30:73] wire [63:0] _io_rrd_read_resps_8_WIRE; // @[Mux.scala:30:73] wire [63:0] _io_rrd_read_resps_9_WIRE; // @[Mux.scala:30:73] wire io_arb_read_reqs_4_ready_0; // @[regfile.scala:128:7] wire io_arb_read_reqs_5_ready_0; // @[regfile.scala:128:7] wire io_arb_read_reqs_6_ready_0; // @[regfile.scala:128:7] wire io_arb_read_reqs_7_ready_0; // @[regfile.scala:128:7] wire io_arb_read_reqs_8_ready_0; // @[regfile.scala:128:7] wire io_arb_read_reqs_9_ready_0; // @[regfile.scala:128:7] wire [63:0] io_rrd_read_resps_0_0; // @[regfile.scala:128:7] wire [63:0] io_rrd_read_resps_1_0; // @[regfile.scala:128:7] wire [63:0] io_rrd_read_resps_2_0; // @[regfile.scala:128:7] wire [63:0] io_rrd_read_resps_3_0; // @[regfile.scala:128:7] wire [63:0] io_rrd_read_resps_4_0; // @[regfile.scala:128:7] wire [63:0] io_rrd_read_resps_5_0; // @[regfile.scala:128:7] wire [63:0] io_rrd_read_resps_6_0; // @[regfile.scala:128:7] wire [63:0] io_rrd_read_resps_7_0; // @[regfile.scala:128:7] wire [63:0] io_rrd_read_resps_8_0; // @[regfile.scala:128:7] wire [63:0] io_rrd_read_resps_9_0; // @[regfile.scala:128:7] wire [5:0] data_sels_0; // @[regfile.scala:149:25] wire [5:0] data_sels_1; // @[regfile.scala:149:25] wire [5:0] data_sels_2; // @[regfile.scala:149:25] wire [5:0] data_sels_3; // @[regfile.scala:149:25] wire [5:0] data_sels_4; // @[regfile.scala:149:25] wire [5:0] data_sels_5; // @[regfile.scala:149:25] wire [5:0] data_sels_6; // @[regfile.scala:149:25] wire [5:0] data_sels_7; // @[regfile.scala:149:25] wire [5:0] data_sels_8; // @[regfile.scala:149:25] wire [5:0] data_sels_9; // @[regfile.scala:149:25] wire issue_read_1; // @[regfile.scala:155:32] wire use_port_1; // @[regfile.scala:156:30] wire _T_93 = ~issue_read & io_arb_read_reqs_0_valid_0; // @[regfile.scala:128:7, :155:32, :157:{13,45}] assign issue_read_1 = _T_93; // @[regfile.scala:155:32, :157:45] assign use_port_1 = _T_93; // @[regfile.scala:156:30, :157:45] wire _T_99 = issue_read_1 | issue_read; // @[regfile.scala:155:32, :165:32] wire issue_read_2; // @[regfile.scala:155:32] wire use_port_2; // @[regfile.scala:156:30] wire _T_103 = ~_T_99 & io_arb_read_reqs_0_valid_0; // @[regfile.scala:128:7, :157:{13,45}, :165:32] assign issue_read_2 = _T_103; // @[regfile.scala:155:32, :157:45] assign use_port_2 = _T_103; // @[regfile.scala:156:30, :157:45] wire _T_109 = issue_read_2 | _T_99; // @[regfile.scala:155:32, :165:32] wire issue_read_3; // @[regfile.scala:155:32] wire use_port_3; // @[regfile.scala:156:30] wire _T_113 = ~_T_109 & io_arb_read_reqs_0_valid_0; // @[regfile.scala:128:7, :157:{13,45}, :165:32] assign issue_read_3 = _T_113; // @[regfile.scala:155:32, :157:45] assign use_port_3 = _T_113; // @[regfile.scala:156:30, :157:45] wire _T_119 = issue_read_3 | _T_109; // @[regfile.scala:155:32, :165:32] wire issue_read_4; // @[regfile.scala:155:32] wire use_port_4; // @[regfile.scala:156:30] wire _T_123 = ~_T_119 & io_arb_read_reqs_0_valid_0; // @[regfile.scala:128:7, :157:{13,45}, :165:32] assign issue_read_4 = _T_123; // @[regfile.scala:155:32, :157:45] assign use_port_4 = _T_123; // @[regfile.scala:156:30, :157:45] wire _T_129 = issue_read_4 | _T_119; // @[regfile.scala:155:32, :165:32] wire issue_read_5; // @[regfile.scala:155:32] wire use_port_5; // @[regfile.scala:156:30] wire _T_133 = ~_T_129 & io_arb_read_reqs_0_valid_0; // @[regfile.scala:128:7, :157:{13,45}, :165:32] assign issue_read_5 = _T_133; // @[regfile.scala:155:32, :157:45] assign use_port_5 = _T_133; // @[regfile.scala:156:30, :157:45] assign data_sels_0 = _T_133 ? 6'h20 : _T_123 ? 6'h10 : _T_113 ? 6'h8 : _T_103 ? 6'h4 : _T_93 ? 6'h2 : 6'h1; // @[regfile.scala:149:25, :157:{45,75}, :160:22] wire issue_read_6; // @[regfile.scala:155:32] wire use_port_6; // @[regfile.scala:156:30] wire _T_150 = ~use_port & io_arb_read_reqs_1_valid_0; // @[regfile.scala:128:7, :156:30, :157:{29,45}] assign issue_read_6 = _T_150; // @[regfile.scala:155:32, :157:45] assign use_port_6 = _T_150; // @[regfile.scala:156:30, :157:45] wire _T_151 = use_port_6 | use_port; // @[regfile.scala:156:30, :163:33] wire issue_read_7; // @[regfile.scala:155:32] wire use_port_7; // @[regfile.scala:156:30] wire _T_160 = ~issue_read_6 & ~use_port_1 & io_arb_read_reqs_1_valid_0; // @[regfile.scala:128:7, :155:32, :156:30, :157:{13,26,29,45}] assign issue_read_7 = _T_160; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_7 = _T_160; // @[regfile.scala:156:30, :157:{26,45}] wire _T_161 = use_port_7 | use_port_1; // @[regfile.scala:156:30, :163:33] wire _T_166 = issue_read_7 | issue_read_6; // @[regfile.scala:155:32, :165:32] wire issue_read_8; // @[regfile.scala:155:32] wire use_port_8; // @[regfile.scala:156:30] wire _T_170 = ~_T_166 & ~use_port_2 & io_arb_read_reqs_1_valid_0; // @[regfile.scala:128:7, :156:30, :157:{13,26,29,45}, :165:32] assign issue_read_8 = _T_170; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_8 = _T_170; // @[regfile.scala:156:30, :157:{26,45}] wire _T_171 = use_port_8 | use_port_2; // @[regfile.scala:156:30, :163:33] wire _T_176 = issue_read_8 | _T_166; // @[regfile.scala:155:32, :165:32] wire issue_read_9; // @[regfile.scala:155:32] wire use_port_9; // @[regfile.scala:156:30] wire _T_180 = ~_T_176 & ~use_port_3 & io_arb_read_reqs_1_valid_0; // @[regfile.scala:128:7, :156:30, :157:{13,26,29,45}, :165:32] assign issue_read_9 = _T_180; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_9 = _T_180; // @[regfile.scala:156:30, :157:{26,45}] wire _T_181 = use_port_9 | use_port_3; // @[regfile.scala:156:30, :163:33] wire _T_186 = issue_read_9 | _T_176; // @[regfile.scala:155:32, :165:32] wire issue_read_10; // @[regfile.scala:155:32] wire use_port_10; // @[regfile.scala:156:30] wire _T_190 = ~_T_186 & ~use_port_4 & io_arb_read_reqs_1_valid_0; // @[regfile.scala:128:7, :156:30, :157:{13,26,29,45}, :165:32] assign issue_read_10 = _T_190; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_10 = _T_190; // @[regfile.scala:156:30, :157:{26,45}] wire _T_191 = use_port_10 | use_port_4; // @[regfile.scala:156:30, :163:33] wire _T_196 = issue_read_10 | _T_186; // @[regfile.scala:155:32, :165:32] wire issue_read_11; // @[regfile.scala:155:32] wire use_port_11; // @[regfile.scala:156:30] wire _T_200 = ~_T_196 & ~use_port_5 & io_arb_read_reqs_1_valid_0; // @[regfile.scala:128:7, :156:30, :157:{13,26,29,45}, :165:32] assign issue_read_11 = _T_200; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_11 = _T_200; // @[regfile.scala:156:30, :157:{26,45}] assign data_sels_1 = _T_200 ? 6'h20 : _T_190 ? 6'h10 : _T_180 ? 6'h8 : _T_170 ? 6'h4 : _T_160 ? 6'h2 : 6'h1; // @[regfile.scala:149:25, :157:{26,45,75}, :160:22] wire _T_201 = use_port_11 | use_port_5; // @[regfile.scala:156:30, :163:33] wire issue_read_12; // @[regfile.scala:155:32] wire use_port_12; // @[regfile.scala:156:30] wire _T_217 = ~_T_151 & io_arb_read_reqs_2_valid_0; // @[regfile.scala:128:7, :157:{29,45}, :163:33] assign issue_read_12 = _T_217; // @[regfile.scala:155:32, :157:45] assign use_port_12 = _T_217; // @[regfile.scala:156:30, :157:45] wire _T_218 = use_port_12 | _T_151; // @[regfile.scala:156:30, :163:33] wire issue_read_13; // @[regfile.scala:155:32] wire use_port_13; // @[regfile.scala:156:30] wire _T_227 = ~issue_read_12 & ~_T_161 & io_arb_read_reqs_2_valid_0; // @[regfile.scala:128:7, :155:32, :157:{13,26,29,45}, :163:33] assign issue_read_13 = _T_227; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_13 = _T_227; // @[regfile.scala:156:30, :157:{26,45}] wire _T_228 = use_port_13 | _T_161; // @[regfile.scala:156:30, :163:33] wire _T_233 = issue_read_13 | issue_read_12; // @[regfile.scala:155:32, :165:32] wire issue_read_14; // @[regfile.scala:155:32] wire use_port_14; // @[regfile.scala:156:30] wire _T_237 = ~_T_233 & ~_T_171 & io_arb_read_reqs_2_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_14 = _T_237; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_14 = _T_237; // @[regfile.scala:156:30, :157:{26,45}] wire _T_238 = use_port_14 | _T_171; // @[regfile.scala:156:30, :163:33] wire _T_243 = issue_read_14 | _T_233; // @[regfile.scala:155:32, :165:32] wire issue_read_15; // @[regfile.scala:155:32] wire use_port_15; // @[regfile.scala:156:30] wire _T_247 = ~_T_243 & ~_T_181 & io_arb_read_reqs_2_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_15 = _T_247; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_15 = _T_247; // @[regfile.scala:156:30, :157:{26,45}] wire _T_248 = use_port_15 | _T_181; // @[regfile.scala:156:30, :163:33] wire _T_253 = issue_read_15 | _T_243; // @[regfile.scala:155:32, :165:32] wire issue_read_16; // @[regfile.scala:155:32] wire use_port_16; // @[regfile.scala:156:30] wire _T_257 = ~_T_253 & ~_T_191 & io_arb_read_reqs_2_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_16 = _T_257; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_16 = _T_257; // @[regfile.scala:156:30, :157:{26,45}] wire _T_258 = use_port_16 | _T_191; // @[regfile.scala:156:30, :163:33] wire _T_263 = issue_read_16 | _T_253; // @[regfile.scala:155:32, :165:32] wire issue_read_17; // @[regfile.scala:155:32] wire use_port_17; // @[regfile.scala:156:30] wire _T_267 = ~_T_263 & ~_T_201 & io_arb_read_reqs_2_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_17 = _T_267; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_17 = _T_267; // @[regfile.scala:156:30, :157:{26,45}] assign data_sels_2 = _T_267 ? 6'h20 : _T_257 ? 6'h10 : _T_247 ? 6'h8 : _T_237 ? 6'h4 : _T_227 ? 6'h2 : 6'h1; // @[regfile.scala:149:25, :157:{26,45,75}, :160:22] wire _T_268 = use_port_17 | _T_201; // @[regfile.scala:156:30, :163:33] wire [1:0] _GEN = {1'h0, io_arb_read_reqs_1_valid_0}; // @[regfile.scala:128:7, :167:42] wire [1:0] _GEN_0 = {1'h0, io_arb_read_reqs_0_valid_0} + _GEN; // @[regfile.scala:128:7, :167:42] wire [1:0] _io_arb_read_reqs_2_ready_T; // @[regfile.scala:167:42] assign _io_arb_read_reqs_2_ready_T = _GEN_0; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_4_ready_T; // @[regfile.scala:167:42] assign _io_arb_read_reqs_4_ready_T = _GEN_0; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_5_ready_T; // @[regfile.scala:167:42] assign _io_arb_read_reqs_5_ready_T = _GEN_0; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_8_ready_T; // @[regfile.scala:167:42] assign _io_arb_read_reqs_8_ready_T = _GEN_0; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_9_ready_T; // @[regfile.scala:167:42] assign _io_arb_read_reqs_9_ready_T = _GEN_0; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_2_ready_T_1 = _io_arb_read_reqs_2_ready_T; // @[regfile.scala:167:42] wire issue_read_18; // @[regfile.scala:155:32] wire use_port_18; // @[regfile.scala:156:30] wire _T_284 = ~_T_218 & io_arb_read_reqs_3_valid_0; // @[regfile.scala:128:7, :157:{29,45}, :163:33] assign issue_read_18 = _T_284; // @[regfile.scala:155:32, :157:45] assign use_port_18 = _T_284; // @[regfile.scala:156:30, :157:45] wire _T_285 = use_port_18 | _T_218; // @[regfile.scala:156:30, :163:33] wire issue_read_19; // @[regfile.scala:155:32] wire use_port_19; // @[regfile.scala:156:30] wire _T_294 = ~issue_read_18 & ~_T_228 & io_arb_read_reqs_3_valid_0; // @[regfile.scala:128:7, :155:32, :157:{13,26,29,45}, :163:33] assign issue_read_19 = _T_294; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_19 = _T_294; // @[regfile.scala:156:30, :157:{26,45}] wire _T_295 = use_port_19 | _T_228; // @[regfile.scala:156:30, :163:33] wire _T_300 = issue_read_19 | issue_read_18; // @[regfile.scala:155:32, :165:32] wire issue_read_20; // @[regfile.scala:155:32] wire use_port_20; // @[regfile.scala:156:30] wire _T_304 = ~_T_300 & ~_T_238 & io_arb_read_reqs_3_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_20 = _T_304; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_20 = _T_304; // @[regfile.scala:156:30, :157:{26,45}] wire _T_305 = use_port_20 | _T_238; // @[regfile.scala:156:30, :163:33] wire _T_310 = issue_read_20 | _T_300; // @[regfile.scala:155:32, :165:32] wire issue_read_21; // @[regfile.scala:155:32] wire use_port_21; // @[regfile.scala:156:30] wire _T_314 = ~_T_310 & ~_T_248 & io_arb_read_reqs_3_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_21 = _T_314; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_21 = _T_314; // @[regfile.scala:156:30, :157:{26,45}] wire _T_315 = use_port_21 | _T_248; // @[regfile.scala:156:30, :163:33] wire _T_320 = issue_read_21 | _T_310; // @[regfile.scala:155:32, :165:32] wire issue_read_22; // @[regfile.scala:155:32] wire use_port_22; // @[regfile.scala:156:30] wire _T_324 = ~_T_320 & ~_T_258 & io_arb_read_reqs_3_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_22 = _T_324; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_22 = _T_324; // @[regfile.scala:156:30, :157:{26,45}] wire _T_325 = use_port_22 | _T_258; // @[regfile.scala:156:30, :163:33] wire _T_330 = issue_read_22 | _T_320; // @[regfile.scala:155:32, :165:32] wire issue_read_23; // @[regfile.scala:155:32] wire use_port_23; // @[regfile.scala:156:30] wire _T_334 = ~_T_330 & ~_T_268 & io_arb_read_reqs_3_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_23 = _T_334; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_23 = _T_334; // @[regfile.scala:156:30, :157:{26,45}] assign data_sels_3 = _T_334 ? 6'h20 : _T_324 ? 6'h10 : _T_314 ? 6'h8 : _T_304 ? 6'h4 : _T_294 ? 6'h2 : 6'h1; // @[regfile.scala:149:25, :157:{26,45,75}, :160:22] wire _T_335 = use_port_23 | _T_268; // @[regfile.scala:156:30, :163:33] wire [1:0] _GEN_1 = {1'h0, io_arb_read_reqs_2_valid_0}; // @[regfile.scala:128:7, :167:42] wire [1:0] _GEN_2 = _GEN + _GEN_1; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_3_ready_T; // @[regfile.scala:167:42] assign _io_arb_read_reqs_3_ready_T = _GEN_2; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_6_ready_T; // @[regfile.scala:167:42] assign _io_arb_read_reqs_6_ready_T = _GEN_2; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_7_ready_T; // @[regfile.scala:167:42] assign _io_arb_read_reqs_7_ready_T = _GEN_2; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_3_ready_T_1 = _io_arb_read_reqs_3_ready_T; // @[regfile.scala:167:42] wire [2:0] _GEN_3 = {2'h0, io_arb_read_reqs_0_valid_0}; // @[regfile.scala:128:7, :167:42] wire [2:0] _io_arb_read_reqs_3_ready_T_2 = _GEN_3 + {1'h0, _io_arb_read_reqs_3_ready_T_1}; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_3_ready_T_3 = _io_arb_read_reqs_3_ready_T_2[1:0]; // @[regfile.scala:167:42] wire issue_read_24; // @[regfile.scala:155:32] wire use_port_24; // @[regfile.scala:156:30] wire _T_351 = ~_T_285 & io_arb_read_reqs_4_valid_0; // @[regfile.scala:128:7, :157:{29,45}, :163:33] assign issue_read_24 = _T_351; // @[regfile.scala:155:32, :157:45] assign use_port_24 = _T_351; // @[regfile.scala:156:30, :157:45] wire _T_352 = use_port_24 | _T_285; // @[regfile.scala:156:30, :163:33] wire issue_read_25; // @[regfile.scala:155:32] wire use_port_25; // @[regfile.scala:156:30] wire _T_361 = ~issue_read_24 & ~_T_295 & io_arb_read_reqs_4_valid_0; // @[regfile.scala:128:7, :155:32, :157:{13,26,29,45}, :163:33] assign issue_read_25 = _T_361; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_25 = _T_361; // @[regfile.scala:156:30, :157:{26,45}] wire _T_362 = use_port_25 | _T_295; // @[regfile.scala:156:30, :163:33] wire _T_367 = issue_read_25 | issue_read_24; // @[regfile.scala:155:32, :165:32] wire issue_read_26; // @[regfile.scala:155:32] wire use_port_26; // @[regfile.scala:156:30] wire _T_371 = ~_T_367 & ~_T_305 & io_arb_read_reqs_4_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_26 = _T_371; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_26 = _T_371; // @[regfile.scala:156:30, :157:{26,45}] wire _T_372 = use_port_26 | _T_305; // @[regfile.scala:156:30, :163:33] wire _T_377 = issue_read_26 | _T_367; // @[regfile.scala:155:32, :165:32] wire issue_read_27; // @[regfile.scala:155:32] wire use_port_27; // @[regfile.scala:156:30] wire _T_381 = ~_T_377 & ~_T_315 & io_arb_read_reqs_4_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_27 = _T_381; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_27 = _T_381; // @[regfile.scala:156:30, :157:{26,45}] wire _T_382 = use_port_27 | _T_315; // @[regfile.scala:156:30, :163:33] wire _T_387 = issue_read_27 | _T_377; // @[regfile.scala:155:32, :165:32] wire issue_read_28; // @[regfile.scala:155:32] wire use_port_28; // @[regfile.scala:156:30] wire _T_391 = ~_T_387 & ~_T_325 & io_arb_read_reqs_4_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_28 = _T_391; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_28 = _T_391; // @[regfile.scala:156:30, :157:{26,45}] wire _T_392 = use_port_28 | _T_325; // @[regfile.scala:156:30, :163:33] wire _T_397 = issue_read_28 | _T_387; // @[regfile.scala:155:32, :165:32] wire issue_read_29; // @[regfile.scala:155:32] wire use_port_29; // @[regfile.scala:156:30] wire _T_401 = ~_T_397 & ~_T_335 & io_arb_read_reqs_4_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_29 = _T_401; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_29 = _T_401; // @[regfile.scala:156:30, :157:{26,45}] assign data_sels_4 = _T_401 ? 6'h20 : _T_391 ? 6'h10 : _T_381 ? 6'h8 : _T_371 ? 6'h4 : _T_361 ? 6'h2 : 6'h1; // @[regfile.scala:149:25, :157:{26,45,75}, :160:22] wire _T_402 = use_port_29 | _T_335; // @[regfile.scala:156:30, :163:33] wire [1:0] _io_arb_read_reqs_4_ready_T_1 = _io_arb_read_reqs_4_ready_T; // @[regfile.scala:167:42] wire [1:0] _GEN_4 = {1'h0, io_arb_read_reqs_3_valid_0}; // @[regfile.scala:128:7, :167:42] wire [1:0] _GEN_5 = _GEN_1 + _GEN_4; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_4_ready_T_2; // @[regfile.scala:167:42] assign _io_arb_read_reqs_4_ready_T_2 = _GEN_5; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_8_ready_T_2; // @[regfile.scala:167:42] assign _io_arb_read_reqs_8_ready_T_2 = _GEN_5; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_9_ready_T_2; // @[regfile.scala:167:42] assign _io_arb_read_reqs_9_ready_T_2 = _GEN_5; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_4_ready_T_3 = _io_arb_read_reqs_4_ready_T_2; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_4_ready_T_4 = {1'h0, _io_arb_read_reqs_4_ready_T_1} + {1'h0, _io_arb_read_reqs_4_ready_T_3}; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_4_ready_T_5 = _io_arb_read_reqs_4_ready_T_4; // @[regfile.scala:167:42] assign _io_arb_read_reqs_4_ready_T_6 = _io_arb_read_reqs_4_ready_T_5[2:1] != 2'h3; // @[regfile.scala:167:{42,82}] assign io_arb_read_reqs_4_ready_0 = _io_arb_read_reqs_4_ready_T_6; // @[regfile.scala:128:7, :167:82] wire issue_read_30; // @[regfile.scala:155:32] wire use_port_30; // @[regfile.scala:156:30] wire _T_418 = ~_T_352 & io_arb_read_reqs_5_valid_0; // @[regfile.scala:128:7, :157:{29,45}, :163:33] assign issue_read_30 = _T_418; // @[regfile.scala:155:32, :157:45] assign use_port_30 = _T_418; // @[regfile.scala:156:30, :157:45] wire _T_419 = use_port_30 | _T_352; // @[regfile.scala:156:30, :163:33] wire issue_read_31; // @[regfile.scala:155:32] wire use_port_31; // @[regfile.scala:156:30] wire _T_428 = ~issue_read_30 & ~_T_362 & io_arb_read_reqs_5_valid_0; // @[regfile.scala:128:7, :155:32, :157:{13,26,29,45}, :163:33] assign issue_read_31 = _T_428; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_31 = _T_428; // @[regfile.scala:156:30, :157:{26,45}] wire _T_429 = use_port_31 | _T_362; // @[regfile.scala:156:30, :163:33] wire _T_434 = issue_read_31 | issue_read_30; // @[regfile.scala:155:32, :165:32] wire issue_read_32; // @[regfile.scala:155:32] wire use_port_32; // @[regfile.scala:156:30] wire _T_438 = ~_T_434 & ~_T_372 & io_arb_read_reqs_5_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_32 = _T_438; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_32 = _T_438; // @[regfile.scala:156:30, :157:{26,45}] wire _T_439 = use_port_32 | _T_372; // @[regfile.scala:156:30, :163:33] wire _T_444 = issue_read_32 | _T_434; // @[regfile.scala:155:32, :165:32] wire issue_read_33; // @[regfile.scala:155:32] wire use_port_33; // @[regfile.scala:156:30] wire _T_448 = ~_T_444 & ~_T_382 & io_arb_read_reqs_5_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_33 = _T_448; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_33 = _T_448; // @[regfile.scala:156:30, :157:{26,45}] wire _T_449 = use_port_33 | _T_382; // @[regfile.scala:156:30, :163:33] wire _T_454 = issue_read_33 | _T_444; // @[regfile.scala:155:32, :165:32] wire issue_read_34; // @[regfile.scala:155:32] wire use_port_34; // @[regfile.scala:156:30] wire _T_458 = ~_T_454 & ~_T_392 & io_arb_read_reqs_5_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_34 = _T_458; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_34 = _T_458; // @[regfile.scala:156:30, :157:{26,45}] wire _T_459 = use_port_34 | _T_392; // @[regfile.scala:156:30, :163:33] wire _T_464 = issue_read_34 | _T_454; // @[regfile.scala:155:32, :165:32] wire issue_read_35; // @[regfile.scala:155:32] wire use_port_35; // @[regfile.scala:156:30] wire _T_468 = ~_T_464 & ~_T_402 & io_arb_read_reqs_5_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_35 = _T_468; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_35 = _T_468; // @[regfile.scala:156:30, :157:{26,45}] assign data_sels_5 = _T_468 ? 6'h20 : _T_458 ? 6'h10 : _T_448 ? 6'h8 : _T_438 ? 6'h4 : _T_428 ? 6'h2 : 6'h1; // @[regfile.scala:149:25, :157:{26,45,75}, :160:22] wire _T_469 = use_port_35 | _T_402; // @[regfile.scala:156:30, :163:33] wire [1:0] _io_arb_read_reqs_5_ready_T_1 = _io_arb_read_reqs_5_ready_T; // @[regfile.scala:167:42] wire [1:0] _GEN_6 = {1'h0, io_arb_read_reqs_4_valid_0}; // @[regfile.scala:128:7, :167:42] wire [1:0] _GEN_7 = _GEN_4 + _GEN_6; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_5_ready_T_2; // @[regfile.scala:167:42] assign _io_arb_read_reqs_5_ready_T_2 = _GEN_7; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_7_ready_T_4; // @[regfile.scala:167:42] assign _io_arb_read_reqs_7_ready_T_4 = _GEN_7; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_5_ready_T_3 = _io_arb_read_reqs_5_ready_T_2; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_5_ready_T_4 = {2'h0, io_arb_read_reqs_2_valid_0} + {1'h0, _io_arb_read_reqs_5_ready_T_3}; // @[regfile.scala:128:7, :167:42] wire [1:0] _io_arb_read_reqs_5_ready_T_5 = _io_arb_read_reqs_5_ready_T_4[1:0]; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_5_ready_T_6 = {1'h0, _io_arb_read_reqs_5_ready_T_1} + {1'h0, _io_arb_read_reqs_5_ready_T_5}; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_5_ready_T_7 = _io_arb_read_reqs_5_ready_T_6; // @[regfile.scala:167:42] assign _io_arb_read_reqs_5_ready_T_8 = _io_arb_read_reqs_5_ready_T_7[2:1] != 2'h3; // @[regfile.scala:167:{42,82}] assign io_arb_read_reqs_5_ready_0 = _io_arb_read_reqs_5_ready_T_8; // @[regfile.scala:128:7, :167:82] wire issue_read_36; // @[regfile.scala:155:32] wire use_port_36; // @[regfile.scala:156:30] wire _T_485 = ~_T_419 & io_arb_read_reqs_6_valid_0; // @[regfile.scala:128:7, :157:{29,45}, :163:33] assign issue_read_36 = _T_485; // @[regfile.scala:155:32, :157:45] assign use_port_36 = _T_485; // @[regfile.scala:156:30, :157:45] wire _T_486 = use_port_36 | _T_419; // @[regfile.scala:156:30, :163:33] wire issue_read_37; // @[regfile.scala:155:32] wire use_port_37; // @[regfile.scala:156:30] wire _T_495 = ~issue_read_36 & ~_T_429 & io_arb_read_reqs_6_valid_0; // @[regfile.scala:128:7, :155:32, :157:{13,26,29,45}, :163:33] assign issue_read_37 = _T_495; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_37 = _T_495; // @[regfile.scala:156:30, :157:{26,45}] wire _T_496 = use_port_37 | _T_429; // @[regfile.scala:156:30, :163:33] wire _T_501 = issue_read_37 | issue_read_36; // @[regfile.scala:155:32, :165:32] wire issue_read_38; // @[regfile.scala:155:32] wire use_port_38; // @[regfile.scala:156:30] wire _T_505 = ~_T_501 & ~_T_439 & io_arb_read_reqs_6_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_38 = _T_505; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_38 = _T_505; // @[regfile.scala:156:30, :157:{26,45}] wire _T_506 = use_port_38 | _T_439; // @[regfile.scala:156:30, :163:33] wire _T_511 = issue_read_38 | _T_501; // @[regfile.scala:155:32, :165:32] wire issue_read_39; // @[regfile.scala:155:32] wire use_port_39; // @[regfile.scala:156:30] wire _T_515 = ~_T_511 & ~_T_449 & io_arb_read_reqs_6_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_39 = _T_515; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_39 = _T_515; // @[regfile.scala:156:30, :157:{26,45}] wire _T_516 = use_port_39 | _T_449; // @[regfile.scala:156:30, :163:33] wire _T_521 = issue_read_39 | _T_511; // @[regfile.scala:155:32, :165:32] wire issue_read_40; // @[regfile.scala:155:32] wire use_port_40; // @[regfile.scala:156:30] wire _T_525 = ~_T_521 & ~_T_459 & io_arb_read_reqs_6_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_40 = _T_525; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_40 = _T_525; // @[regfile.scala:156:30, :157:{26,45}] wire _T_526 = use_port_40 | _T_459; // @[regfile.scala:156:30, :163:33] wire _T_531 = issue_read_40 | _T_521; // @[regfile.scala:155:32, :165:32] wire issue_read_41; // @[regfile.scala:155:32] wire use_port_41; // @[regfile.scala:156:30] wire _T_535 = ~_T_531 & ~_T_469 & io_arb_read_reqs_6_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_41 = _T_535; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_41 = _T_535; // @[regfile.scala:156:30, :157:{26,45}] assign data_sels_6 = _T_535 ? 6'h20 : _T_525 ? 6'h10 : _T_515 ? 6'h8 : _T_505 ? 6'h4 : _T_495 ? 6'h2 : 6'h1; // @[regfile.scala:149:25, :157:{26,45,75}, :160:22] wire _T_536 = use_port_41 | _T_469; // @[regfile.scala:156:30, :163:33] wire [1:0] _io_arb_read_reqs_6_ready_T_1 = _io_arb_read_reqs_6_ready_T; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_6_ready_T_2 = _GEN_3 + {1'h0, _io_arb_read_reqs_6_ready_T_1}; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_6_ready_T_3 = _io_arb_read_reqs_6_ready_T_2[1:0]; // @[regfile.scala:167:42] wire [1:0] _GEN_8 = {1'h0, io_arb_read_reqs_5_valid_0}; // @[regfile.scala:128:7, :167:42] wire [1:0] _GEN_9 = _GEN_6 + _GEN_8; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_6_ready_T_4; // @[regfile.scala:167:42] assign _io_arb_read_reqs_6_ready_T_4 = _GEN_9; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_8_ready_T_6; // @[regfile.scala:167:42] assign _io_arb_read_reqs_8_ready_T_6 = _GEN_9; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_9_ready_T_6; // @[regfile.scala:167:42] assign _io_arb_read_reqs_9_ready_T_6 = _GEN_9; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_6_ready_T_5 = _io_arb_read_reqs_6_ready_T_4; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_6_ready_T_6 = {2'h0, io_arb_read_reqs_3_valid_0} + {1'h0, _io_arb_read_reqs_6_ready_T_5}; // @[regfile.scala:128:7, :167:42] wire [1:0] _io_arb_read_reqs_6_ready_T_7 = _io_arb_read_reqs_6_ready_T_6[1:0]; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_6_ready_T_8 = {1'h0, _io_arb_read_reqs_6_ready_T_3} + {1'h0, _io_arb_read_reqs_6_ready_T_7}; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_6_ready_T_9 = _io_arb_read_reqs_6_ready_T_8; // @[regfile.scala:167:42] assign _io_arb_read_reqs_6_ready_T_10 = _io_arb_read_reqs_6_ready_T_9[2:1] != 2'h3; // @[regfile.scala:167:{42,82}] assign io_arb_read_reqs_6_ready_0 = _io_arb_read_reqs_6_ready_T_10; // @[regfile.scala:128:7, :167:82] wire issue_read_42; // @[regfile.scala:155:32] wire use_port_42; // @[regfile.scala:156:30] wire _T_552 = ~_T_486 & io_arb_read_reqs_7_valid_0; // @[regfile.scala:128:7, :157:{29,45}, :163:33] assign issue_read_42 = _T_552; // @[regfile.scala:155:32, :157:45] assign use_port_42 = _T_552; // @[regfile.scala:156:30, :157:45] wire _T_553 = use_port_42 | _T_486; // @[regfile.scala:156:30, :163:33] wire issue_read_43; // @[regfile.scala:155:32] wire use_port_43; // @[regfile.scala:156:30] wire _T_562 = ~issue_read_42 & ~_T_496 & io_arb_read_reqs_7_valid_0; // @[regfile.scala:128:7, :155:32, :157:{13,26,29,45}, :163:33] assign issue_read_43 = _T_562; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_43 = _T_562; // @[regfile.scala:156:30, :157:{26,45}] wire _T_563 = use_port_43 | _T_496; // @[regfile.scala:156:30, :163:33] wire _T_568 = issue_read_43 | issue_read_42; // @[regfile.scala:155:32, :165:32] wire issue_read_44; // @[regfile.scala:155:32] wire use_port_44; // @[regfile.scala:156:30] wire _T_572 = ~_T_568 & ~_T_506 & io_arb_read_reqs_7_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_44 = _T_572; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_44 = _T_572; // @[regfile.scala:156:30, :157:{26,45}] wire _T_573 = use_port_44 | _T_506; // @[regfile.scala:156:30, :163:33] wire _T_578 = issue_read_44 | _T_568; // @[regfile.scala:155:32, :165:32] wire issue_read_45; // @[regfile.scala:155:32] wire use_port_45; // @[regfile.scala:156:30] wire _T_582 = ~_T_578 & ~_T_516 & io_arb_read_reqs_7_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_45 = _T_582; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_45 = _T_582; // @[regfile.scala:156:30, :157:{26,45}] wire _T_583 = use_port_45 | _T_516; // @[regfile.scala:156:30, :163:33] wire _T_588 = issue_read_45 | _T_578; // @[regfile.scala:155:32, :165:32] wire issue_read_46; // @[regfile.scala:155:32] wire use_port_46; // @[regfile.scala:156:30] wire _T_592 = ~_T_588 & ~_T_526 & io_arb_read_reqs_7_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_46 = _T_592; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_46 = _T_592; // @[regfile.scala:156:30, :157:{26,45}] wire _T_593 = use_port_46 | _T_526; // @[regfile.scala:156:30, :163:33] wire _T_598 = issue_read_46 | _T_588; // @[regfile.scala:155:32, :165:32] wire issue_read_47; // @[regfile.scala:155:32] wire use_port_47; // @[regfile.scala:156:30] wire _T_602 = ~_T_598 & ~_T_536 & io_arb_read_reqs_7_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_47 = _T_602; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_47 = _T_602; // @[regfile.scala:156:30, :157:{26,45}] assign data_sels_7 = _T_602 ? 6'h20 : _T_592 ? 6'h10 : _T_582 ? 6'h8 : _T_572 ? 6'h4 : _T_562 ? 6'h2 : 6'h1; // @[regfile.scala:149:25, :157:{26,45,75}, :160:22] wire _T_603 = use_port_47 | _T_536; // @[regfile.scala:156:30, :163:33] wire [1:0] _io_arb_read_reqs_7_ready_T_1 = _io_arb_read_reqs_7_ready_T; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_7_ready_T_2 = _GEN_3 + {1'h0, _io_arb_read_reqs_7_ready_T_1}; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_7_ready_T_3 = _io_arb_read_reqs_7_ready_T_2[1:0]; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_7_ready_T_5 = _io_arb_read_reqs_7_ready_T_4; // @[regfile.scala:167:42] wire [1:0] _GEN_10 = {1'h0, io_arb_read_reqs_6_valid_0}; // @[regfile.scala:128:7, :167:42] wire [1:0] _io_arb_read_reqs_7_ready_T_6 = _GEN_8 + _GEN_10; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_7_ready_T_7 = _io_arb_read_reqs_7_ready_T_6; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_7_ready_T_8 = {1'h0, _io_arb_read_reqs_7_ready_T_5} + {1'h0, _io_arb_read_reqs_7_ready_T_7}; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_7_ready_T_9 = _io_arb_read_reqs_7_ready_T_8; // @[regfile.scala:167:42] wire [3:0] _io_arb_read_reqs_7_ready_T_10 = {2'h0, _io_arb_read_reqs_7_ready_T_3} + {1'h0, _io_arb_read_reqs_7_ready_T_9}; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_7_ready_T_11 = _io_arb_read_reqs_7_ready_T_10[2:0]; // @[regfile.scala:167:42] assign _io_arb_read_reqs_7_ready_T_12 = _io_arb_read_reqs_7_ready_T_11[2:1] != 2'h3; // @[regfile.scala:167:{42,82}] assign io_arb_read_reqs_7_ready_0 = _io_arb_read_reqs_7_ready_T_12; // @[regfile.scala:128:7, :167:82] wire issue_read_48; // @[regfile.scala:155:32] wire use_port_48; // @[regfile.scala:156:30] wire _T_619 = ~_T_553 & io_arb_read_reqs_8_valid_0; // @[regfile.scala:128:7, :157:{29,45}, :163:33] assign issue_read_48 = _T_619; // @[regfile.scala:155:32, :157:45] assign use_port_48 = _T_619; // @[regfile.scala:156:30, :157:45] wire _T_620 = use_port_48 | _T_553; // @[regfile.scala:156:30, :163:33] wire issue_read_49; // @[regfile.scala:155:32] wire use_port_49; // @[regfile.scala:156:30] wire _T_629 = ~issue_read_48 & ~_T_563 & io_arb_read_reqs_8_valid_0; // @[regfile.scala:128:7, :155:32, :157:{13,26,29,45}, :163:33] assign issue_read_49 = _T_629; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_49 = _T_629; // @[regfile.scala:156:30, :157:{26,45}] wire _T_630 = use_port_49 | _T_563; // @[regfile.scala:156:30, :163:33] wire _T_635 = issue_read_49 | issue_read_48; // @[regfile.scala:155:32, :165:32] wire issue_read_50; // @[regfile.scala:155:32] wire use_port_50; // @[regfile.scala:156:30] wire _T_639 = ~_T_635 & ~_T_573 & io_arb_read_reqs_8_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_50 = _T_639; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_50 = _T_639; // @[regfile.scala:156:30, :157:{26,45}] wire _T_640 = use_port_50 | _T_573; // @[regfile.scala:156:30, :163:33] wire _T_645 = issue_read_50 | _T_635; // @[regfile.scala:155:32, :165:32] wire issue_read_51; // @[regfile.scala:155:32] wire use_port_51; // @[regfile.scala:156:30] wire _T_649 = ~_T_645 & ~_T_583 & io_arb_read_reqs_8_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_51 = _T_649; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_51 = _T_649; // @[regfile.scala:156:30, :157:{26,45}] wire _T_650 = use_port_51 | _T_583; // @[regfile.scala:156:30, :163:33] wire _T_655 = issue_read_51 | _T_645; // @[regfile.scala:155:32, :165:32] wire issue_read_52; // @[regfile.scala:155:32] wire use_port_52; // @[regfile.scala:156:30] wire _T_659 = ~_T_655 & ~_T_593 & io_arb_read_reqs_8_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_52 = _T_659; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_52 = _T_659; // @[regfile.scala:156:30, :157:{26,45}] wire _T_660 = use_port_52 | _T_593; // @[regfile.scala:156:30, :163:33] wire _T_665 = issue_read_52 | _T_655; // @[regfile.scala:155:32, :165:32] wire issue_read_53; // @[regfile.scala:155:32] wire use_port_53; // @[regfile.scala:156:30] wire _T_669 = ~_T_665 & ~_T_603 & io_arb_read_reqs_8_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_53 = _T_669; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_53 = _T_669; // @[regfile.scala:156:30, :157:{26,45}] assign data_sels_8 = _T_669 ? 6'h20 : _T_659 ? 6'h10 : _T_649 ? 6'h8 : _T_639 ? 6'h4 : _T_629 ? 6'h2 : 6'h1; // @[regfile.scala:149:25, :157:{26,45,75}, :160:22] wire _T_670 = use_port_53 | _T_603; // @[regfile.scala:156:30, :163:33] wire [1:0] _io_arb_read_reqs_8_ready_T_1 = _io_arb_read_reqs_8_ready_T; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_8_ready_T_3 = _io_arb_read_reqs_8_ready_T_2; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_8_ready_T_4 = {1'h0, _io_arb_read_reqs_8_ready_T_1} + {1'h0, _io_arb_read_reqs_8_ready_T_3}; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_8_ready_T_5 = _io_arb_read_reqs_8_ready_T_4; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_8_ready_T_7 = _io_arb_read_reqs_8_ready_T_6; // @[regfile.scala:167:42] wire [1:0] _GEN_11 = {1'h0, io_arb_read_reqs_7_valid_0}; // @[regfile.scala:128:7, :167:42] wire [1:0] _io_arb_read_reqs_8_ready_T_8 = _GEN_10 + _GEN_11; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_8_ready_T_9 = _io_arb_read_reqs_8_ready_T_8; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_8_ready_T_10 = {1'h0, _io_arb_read_reqs_8_ready_T_7} + {1'h0, _io_arb_read_reqs_8_ready_T_9}; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_8_ready_T_11 = _io_arb_read_reqs_8_ready_T_10; // @[regfile.scala:167:42] wire [3:0] _io_arb_read_reqs_8_ready_T_12 = {1'h0, _io_arb_read_reqs_8_ready_T_5} + {1'h0, _io_arb_read_reqs_8_ready_T_11}; // @[regfile.scala:167:42] wire [3:0] _io_arb_read_reqs_8_ready_T_13 = _io_arb_read_reqs_8_ready_T_12; // @[regfile.scala:167:42] assign _io_arb_read_reqs_8_ready_T_14 = _io_arb_read_reqs_8_ready_T_13 < 4'h6; // @[regfile.scala:167:{42,82}] assign io_arb_read_reqs_8_ready_0 = _io_arb_read_reqs_8_ready_T_14; // @[regfile.scala:128:7, :167:82] wire issue_read_54; // @[regfile.scala:155:32] wire use_port_54; // @[regfile.scala:156:30] wire _T_686 = ~_T_620 & io_arb_read_reqs_9_valid_0; // @[regfile.scala:128:7, :157:{29,45}, :163:33] assign issue_read_54 = _T_686; // @[regfile.scala:155:32, :157:45] assign use_port_54 = _T_686; // @[regfile.scala:156:30, :157:45] wire issue_read_55; // @[regfile.scala:155:32] wire use_port_55; // @[regfile.scala:156:30] wire _T_696 = ~issue_read_54 & ~_T_630 & io_arb_read_reqs_9_valid_0; // @[regfile.scala:128:7, :155:32, :157:{13,26,29,45}, :163:33] assign issue_read_55 = _T_696; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_55 = _T_696; // @[regfile.scala:156:30, :157:{26,45}] wire _T_702 = issue_read_55 | issue_read_54; // @[regfile.scala:155:32, :165:32] wire issue_read_56; // @[regfile.scala:155:32] wire use_port_56; // @[regfile.scala:156:30] wire _T_706 = ~_T_702 & ~_T_640 & io_arb_read_reqs_9_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_56 = _T_706; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_56 = _T_706; // @[regfile.scala:156:30, :157:{26,45}] wire _T_712 = issue_read_56 | _T_702; // @[regfile.scala:155:32, :165:32] wire issue_read_57; // @[regfile.scala:155:32] wire use_port_57; // @[regfile.scala:156:30] wire _T_716 = ~_T_712 & ~_T_650 & io_arb_read_reqs_9_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_57 = _T_716; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_57 = _T_716; // @[regfile.scala:156:30, :157:{26,45}] wire _T_722 = issue_read_57 | _T_712; // @[regfile.scala:155:32, :165:32] wire issue_read_58; // @[regfile.scala:155:32] wire use_port_58; // @[regfile.scala:156:30] wire _T_726 = ~_T_722 & ~_T_660 & io_arb_read_reqs_9_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_58 = _T_726; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_58 = _T_726; // @[regfile.scala:156:30, :157:{26,45}] wire _T_732 = issue_read_58 | _T_722; // @[regfile.scala:155:32, :165:32] wire issue_read_59; // @[regfile.scala:155:32] wire use_port_59; // @[regfile.scala:156:30] wire _T_736 = ~_T_732 & ~_T_670 & io_arb_read_reqs_9_valid_0; // @[regfile.scala:128:7, :157:{13,26,29,45}, :163:33, :165:32] assign issue_read_59 = _T_736; // @[regfile.scala:155:32, :157:{26,45}] assign use_port_59 = _T_736; // @[regfile.scala:156:30, :157:{26,45}] assign data_sels_9 = _T_736 ? 6'h20 : _T_726 ? 6'h10 : _T_716 ? 6'h8 : _T_706 ? 6'h4 : _T_696 ? 6'h2 : 6'h1; // @[regfile.scala:149:25, :157:{26,45,75}, :160:22] wire [1:0] _io_arb_read_reqs_9_ready_T_1 = _io_arb_read_reqs_9_ready_T; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_9_ready_T_3 = _io_arb_read_reqs_9_ready_T_2; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_9_ready_T_4 = {1'h0, _io_arb_read_reqs_9_ready_T_1} + {1'h0, _io_arb_read_reqs_9_ready_T_3}; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_9_ready_T_5 = _io_arb_read_reqs_9_ready_T_4; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_9_ready_T_7 = _io_arb_read_reqs_9_ready_T_6; // @[regfile.scala:167:42] wire [1:0] _io_arb_read_reqs_9_ready_T_8 = _GEN_11 + {1'h0, io_arb_read_reqs_8_valid_0}; // @[regfile.scala:128:7, :167:42] wire [1:0] _io_arb_read_reqs_9_ready_T_9 = _io_arb_read_reqs_9_ready_T_8; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_9_ready_T_10 = {2'h0, io_arb_read_reqs_6_valid_0} + {1'h0, _io_arb_read_reqs_9_ready_T_9}; // @[regfile.scala:128:7, :167:42] wire [1:0] _io_arb_read_reqs_9_ready_T_11 = _io_arb_read_reqs_9_ready_T_10[1:0]; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_9_ready_T_12 = {1'h0, _io_arb_read_reqs_9_ready_T_7} + {1'h0, _io_arb_read_reqs_9_ready_T_11}; // @[regfile.scala:167:42] wire [2:0] _io_arb_read_reqs_9_ready_T_13 = _io_arb_read_reqs_9_ready_T_12; // @[regfile.scala:167:42] wire [3:0] _io_arb_read_reqs_9_ready_T_14 = {1'h0, _io_arb_read_reqs_9_ready_T_5} + {1'h0, _io_arb_read_reqs_9_ready_T_13}; // @[regfile.scala:167:42] wire [3:0] _io_arb_read_reqs_9_ready_T_15 = _io_arb_read_reqs_9_ready_T_14; // @[regfile.scala:167:42] assign _io_arb_read_reqs_9_ready_T_16 = _io_arb_read_reqs_9_ready_T_15 < 4'h6; // @[regfile.scala:167:{42,82}] assign io_arb_read_reqs_9_ready_0 = _io_arb_read_reqs_9_ready_T_16; // @[regfile.scala:128:7, :167:82]
Generate the Verilog code corresponding to the following Chisel files. File RegisterFile.scala: package saturn.backend import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import freechips.rocketchip.tile.{CoreModule} import freechips.rocketchip.util._ import saturn.common._ class OldestRRArbiter(val n: Int)(implicit p: Parameters) extends Module { val io = IO(new ArbiterIO(new VectorReadReq, n)) val arb = Module(new RRArbiter(new VectorReadReq, n)) io <> arb.io val oldest_oh = io.in.map(i => i.valid && i.bits.oldest) //assert(PopCount(oldest_oh) <= 1.U) when (oldest_oh.orR) { io.chosen := VecInit(oldest_oh).asUInt io.out.valid := true.B io.out.bits := Mux1H(oldest_oh, io.in.map(_.bits)) for (i <- 0 until n) { io.in(i).ready := oldest_oh(i) && io.out.ready } } } class RegisterReadXbar(n: Int, banks: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val io = IO(new Bundle { val in = Vec(n, Flipped(new VectorReadIO)) val out = Vec(banks, new VectorReadIO) }) val arbs = Seq.fill(banks) { Module(new OldestRRArbiter(n)) } for (i <- 0 until banks) { io.out(i).req <> arbs(i).io.out } val bankOffset = log2Ceil(banks) for (i <- 0 until n) { val bank_sel = if (bankOffset == 0) true.B else UIntToOH(io.in(i).req.bits.eg(bankOffset-1,0)) for (j <- 0 until banks) { arbs(j).io.in(i).valid := io.in(i).req.valid && bank_sel(j) arbs(j).io.in(i).bits.eg := io.in(i).req.bits.eg >> bankOffset arbs(j).io.in(i).bits.oldest := io.in(i).req.bits.oldest } io.in(i).req.ready := Mux1H(bank_sel, arbs.map(_.io.in(i).ready)) io.in(i).resp := Mux1H(bank_sel, io.out.map(_.resp)) } } class RegisterFileBank(reads: Int, maskReads: Int, rows: Int, maskRows: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val io = IO(new Bundle { val read = Vec(reads, Flipped(new VectorReadIO)) val mask_read = Vec(maskReads, Flipped(new VectorReadIO)) val write = Input(Valid(new VectorWrite(dLen))) val ll_write = Flipped(Decoupled(new VectorWrite(dLen))) }) val ll_write_valid = RegInit(false.B) val ll_write_bits = Reg(new VectorWrite(dLen)) val vrf = Mem(rows, Vec(dLen, Bool())) val v0_mask = Mem(maskRows, Vec(dLen, Bool())) for (read <- io.read) { read.req.ready := !(ll_write_valid && read.req.bits.eg === ll_write_bits.eg) read.resp := DontCare when (read.req.valid) { read.resp := vrf.read(read.req.bits.eg).asUInt } } for (mask_read <- io.mask_read) { mask_read.req.ready := !(ll_write_valid && mask_read.req.bits.eg === ll_write_bits.eg) mask_read.resp := DontCare when (mask_read.req.valid) { mask_read.resp := v0_mask.read(mask_read.req.bits.eg).asUInt } } val write = WireInit(io.write) io.ll_write.ready := false.B if (vParams.vrfHiccupBuffer) { when (!io.write.valid) { // drain hiccup buffer write.valid := ll_write_valid || io.ll_write.valid write.bits := Mux(ll_write_valid, ll_write_bits, io.ll_write.bits) ll_write_valid := false.B when (io.ll_write.valid && ll_write_valid) { ll_write_valid := true.B ll_write_bits := io.ll_write.bits } io.ll_write.ready := true.B } .elsewhen (!ll_write_valid) { // fill hiccup buffer when (io.ll_write.valid) { ll_write_valid := true.B ll_write_bits := io.ll_write.bits } io.ll_write.ready := true.B } } else { when (!io.write.valid) { io.ll_write.ready := true.B write.valid := io.ll_write.valid write.bits := io.ll_write.bits } } when (write.valid) { vrf.write( write.bits.eg, VecInit(write.bits.data.asBools), write.bits.mask.asBools) when (write.bits.eg < maskRows.U) { v0_mask.write( write.bits.eg, VecInit(write.bits.data.asBools), write.bits.mask.asBools) } } } class RegisterFile(reads: Seq[Int], maskReads: Seq[Int], pipeWrites: Int, llWrites: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val nBanks = vParams.vrfBanking // Support 1, 2, and 4 banks for the VRF require(nBanks == 1 || nBanks == 2 || nBanks == 4) val io = IO(new Bundle { val read = MixedVec(reads.map(rc => Vec(rc, Flipped(new VectorReadIO)))) val mask_read = MixedVec(maskReads.map(rc => Vec(rc, Flipped(new VectorReadIO)))) val pipe_writes = Vec(pipeWrites, Input(Valid(new VectorWrite(dLen)))) val ll_writes = Vec(llWrites, Flipped(Decoupled(new VectorWrite(dLen)))) }) val vrf = Seq.fill(nBanks) { Module(new RegisterFileBank(reads.size, maskReads.size, egsTotal/nBanks, if (egsPerVReg < nBanks) 1 else egsPerVReg / nBanks)) } reads.zipWithIndex.foreach { case (rc, i) => val xbar = Module(new RegisterReadXbar(rc, nBanks)) vrf.zipWithIndex.foreach { case (bank, j) => bank.io.read(i) <> xbar.io.out(j) } xbar.io.in <> io.read(i) } maskReads.zipWithIndex.foreach { case (rc, i) => val mask_xbar = Module(new RegisterReadXbar(rc, nBanks)) vrf.zipWithIndex.foreach { case (bank, j) => bank.io.mask_read(i) <> mask_xbar.io.out(j) } mask_xbar.io.in <> io.mask_read(i) } io.ll_writes.foreach(_.ready := false.B) vrf.zipWithIndex.foreach { case (rf, i) => val bank_match = io.pipe_writes.map { w => (w.bits.bankId === i.U) && w.valid } val bank_write_data = Mux1H(bank_match, io.pipe_writes.map(_.bits.data)) val bank_write_mask = Mux1H(bank_match, io.pipe_writes.map(_.bits.mask)) val bank_write_eg = Mux1H(bank_match, io.pipe_writes.map(_.bits.eg)) val bank_write_valid = bank_match.orR rf.io.write.valid := bank_write_valid rf.io.write.bits.data := bank_write_data rf.io.write.bits.mask := bank_write_mask rf.io.write.bits.eg := bank_write_eg >> vrfBankBits when (bank_write_valid) { PopCount(bank_match) === 1.U } val ll_arb = Module(new Arbiter(new VectorWrite(dLen), llWrites)) rf.io.ll_write <> ll_arb.io.out io.ll_writes.zipWithIndex.foreach { case (w, j) => ll_arb.io.in(j).valid := w.valid && w.bits.bankId === i.U ll_arb.io.in(j).bits.eg := w.bits.eg >> vrfBankBits ll_arb.io.in(j).bits.data := w.bits.data ll_arb.io.in(j).bits.mask := w.bits.mask when (ll_arb.io.in(j).ready && w.bits.bankId === i.U) { w.ready := true.B } } } }
module OldestRRArbiter_4( // @[RegisterFile.scala:10:7] input clock, // @[RegisterFile.scala:10:7] output io_in_0_ready, // @[RegisterFile.scala:11:14] input io_in_0_valid, // @[RegisterFile.scala:11:14] input [5:0] io_in_0_bits_eg, // @[RegisterFile.scala:11:14] input io_in_0_bits_oldest, // @[RegisterFile.scala:11:14] output io_in_1_ready, // @[RegisterFile.scala:11:14] input io_in_1_valid, // @[RegisterFile.scala:11:14] input [5:0] io_in_1_bits_eg, // @[RegisterFile.scala:11:14] input io_in_1_bits_oldest, // @[RegisterFile.scala:11:14] input io_out_ready, // @[RegisterFile.scala:11:14] output io_out_valid, // @[RegisterFile.scala:11:14] output [5:0] io_out_bits_eg // @[RegisterFile.scala:11:14] ); wire _arb_io_in_0_ready; // @[RegisterFile.scala:13:19] wire _arb_io_in_1_ready; // @[RegisterFile.scala:13:19] wire _arb_io_out_valid; // @[RegisterFile.scala:13:19] wire [5:0] _arb_io_out_bits_eg; // @[RegisterFile.scala:13:19] wire oldest_oh_0 = io_in_0_valid & io_in_0_bits_oldest; // @[RegisterFile.scala:15:42] wire oldest_oh_1 = io_in_1_valid & io_in_1_bits_oldest; // @[RegisterFile.scala:15:42] wire _GEN = oldest_oh_0 | oldest_oh_1; // @[RegisterFile.scala:15:42] RRArbiter_4 arb ( // @[RegisterFile.scala:13:19] .clock (clock), .io_in_0_ready (_arb_io_in_0_ready), .io_in_0_valid (io_in_0_valid), .io_in_0_bits_eg (io_in_0_bits_eg), .io_in_1_ready (_arb_io_in_1_ready), .io_in_1_valid (io_in_1_valid), .io_in_1_bits_eg (io_in_1_bits_eg), .io_out_ready (io_out_ready), .io_out_valid (_arb_io_out_valid), .io_out_bits_eg (_arb_io_out_bits_eg) ); // @[RegisterFile.scala:13:19] assign io_in_0_ready = _GEN ? oldest_oh_0 & io_out_ready : _arb_io_in_0_ready; // @[RegisterFile.scala:10:7, :13:19, :14:6, :15:42, :17:24, :22:{22,38}] assign io_in_1_ready = _GEN ? oldest_oh_1 & io_out_ready : _arb_io_in_1_ready; // @[RegisterFile.scala:10:7, :13:19, :14:6, :15:42, :17:24, :22:{22,38}] assign io_out_valid = _GEN | _arb_io_out_valid; // @[RegisterFile.scala:10:7, :13:19, :14:6, :17:24, :19:18] assign io_out_bits_eg = _GEN ? (oldest_oh_0 ? io_in_0_bits_eg : 6'h0) | (oldest_oh_1 ? io_in_1_bits_eg : 6'h0) : _arb_io_out_bits_eg; // @[Mux.scala:30:73] endmodule
Generate the Verilog code corresponding to the following Chisel files. File IngressUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ class IngressUnit( ingressNodeId: Int, cParam: IngressChannelParams, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams], combineRCVA: Boolean, combineSAST: Boolean, ) (implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) { class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) { val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits))) } val io = IO(new IngressUnitIO) val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2)) val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2, flow=combineRCVA)) assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR)) route_buffer.io.enq.bits.head := io.in.bits.head route_buffer.io.enq.bits.tail := io.in.bits.tail val flows = cParam.possibleFlows.toSeq if (flows.size == 0) { route_buffer.io.enq.bits.flow := DontCare } else { route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U route_buffer.io.enq.bits.flow.egress_node := Mux1H( flows.map(_.egressId.U === io.in.bits.egress_id), flows.map(_.egressNode.U) ) route_buffer.io.enq.bits.flow.egress_node_id := Mux1H( flows.map(_.egressId.U === io.in.bits.egress_id), flows.map(_.egressNodeId.U) ) } route_buffer.io.enq.bits.payload := io.in.bits.payload route_buffer.io.enq.bits.virt_channel_id := DontCare io.router_req.bits.src_virt_id := 0.U io.router_req.bits.flow := route_buffer.io.enq.bits.flow val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U route_buffer.io.enq.valid := io.in.valid && ( io.router_req.ready || !io.in.bits.head || at_dest) io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest io.in.ready := route_buffer.io.enq.ready && ( io.router_req.ready || !io.in.bits.head || at_dest) route_q.io.enq.valid := io.router_req.fire route_q.io.enq.bits := io.router_resp when (io.in.fire && io.in.bits.head && at_dest) { route_q.io.enq.valid := true.B route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B)) for (o <- 0 until nEgress) { when (egressParams(o).egressId.U === io.in.bits.egress_id) { route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B } } } assert(!(route_q.io.enq.valid && !route_q.io.enq.ready)) val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2)) val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams), 1, pipe=true)) vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow io.vcalloc_req.bits.in_vc := 0.U val head = route_buffer.io.deq.bits.head val tail = route_buffer.io.deq.bits.tail vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid && (route_q.io.deq.valid || !head) && (io.vcalloc_req.ready || !head) ) io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid && head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready) route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready && (route_q.io.deq.valid || !head) && (io.vcalloc_req.ready || !head) && (vcalloc_q.io.enq.ready || !head)) route_q.io.deq.ready := (route_buffer.io.deq.fire && tail) vcalloc_q.io.enq.valid := io.vcalloc_req.fire vcalloc_q.io.enq.bits := io.vcalloc_resp assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready)) io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire val out_bundle = if (combineSAST) { Wire(Valid(new SwitchBundle(outParams, egressParams))) } else { Reg(Valid(new SwitchBundle(outParams, egressParams))) } io.out(0) := out_bundle out_bundle.valid := vcalloc_buffer.io.deq.fire out_bundle.bits.flit := vcalloc_buffer.io.deq.bits out_bundle.bits.flit.virt_channel_id := 0.U val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh, vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq) io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready // TODO: We should not generate input/ingress/output/egress units for untraversable channels if (!cParam.traversable) { io.in.ready := false.B io.router_req.valid := false.B io.router_req.bits := DontCare io.vcalloc_req.valid := false.B io.vcalloc_req.bits := DontCare io.salloc_req.foreach(_.valid := false.B) io.salloc_req.foreach(_.bits := DontCare) io.out.foreach(_.valid := false.B) io.out.foreach(_.bits := DontCare) } }
module IngressUnit_35( // @[IngressUnit.scala:11:7] input clock, // @[IngressUnit.scala:11:7] input reset, // @[IngressUnit.scala:11:7] input io_vcalloc_req_ready, // @[IngressUnit.scala:24:14] output io_vcalloc_req_valid, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_8, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_9, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_10, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_11, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_12, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_13, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_14, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_15, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_16, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_17, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_18, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_19, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_20, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_21, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_2, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_3, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_4, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_5, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_6, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_7, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_8, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_9, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_10, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_11, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_12, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_13, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_14, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_15, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_16, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_17, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_18, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_19, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_20, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_21, // @[IngressUnit.scala:24:14] input io_out_credit_available_2_0, // @[IngressUnit.scala:24:14] input io_out_credit_available_1_0, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_10, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_11, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_14, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_15, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_18, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_19, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_20, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_21, // @[IngressUnit.scala:24:14] input io_salloc_req_0_ready, // @[IngressUnit.scala:24:14] output io_salloc_req_0_valid, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_8, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_9, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_10, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_11, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_12, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_13, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_14, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_15, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_16, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_17, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_18, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_19, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_20, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_21, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_tail, // @[IngressUnit.scala:24:14] output io_out_0_valid, // @[IngressUnit.scala:24:14] output io_out_0_bits_flit_head, // @[IngressUnit.scala:24:14] output io_out_0_bits_flit_tail, // @[IngressUnit.scala:24:14] output [72:0] io_out_0_bits_flit_payload, // @[IngressUnit.scala:24:14] output [3:0] io_out_0_bits_flit_flow_vnet_id, // @[IngressUnit.scala:24:14] output [5:0] io_out_0_bits_flit_flow_ingress_node, // @[IngressUnit.scala:24:14] output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[IngressUnit.scala:24:14] output [5:0] io_out_0_bits_flit_flow_egress_node, // @[IngressUnit.scala:24:14] output [2:0] io_out_0_bits_flit_flow_egress_node_id, // @[IngressUnit.scala:24:14] output [4:0] io_out_0_bits_out_virt_channel, // @[IngressUnit.scala:24:14] output io_in_ready, // @[IngressUnit.scala:24:14] input io_in_valid, // @[IngressUnit.scala:24:14] input io_in_bits_head, // @[IngressUnit.scala:24:14] input io_in_bits_tail, // @[IngressUnit.scala:24:14] input [72:0] io_in_bits_payload, // @[IngressUnit.scala:24:14] input [5:0] io_in_bits_egress_id // @[IngressUnit.scala:24:14] ); wire _GEN; // @[Decoupled.scala:51:35] wire _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_valid; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_2_0; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_1_0; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_0; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_1; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_2; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_3; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_4; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_5; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_6; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_7; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_8; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_9; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_10; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_11; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_12; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_13; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_14; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_15; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_16; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_17; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_18; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_19; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_20; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_21; // @[IngressUnit.scala:76:25] wire _vcalloc_buffer_io_enq_ready; // @[IngressUnit.scala:75:30] wire _vcalloc_buffer_io_deq_valid; // @[IngressUnit.scala:75:30] wire _vcalloc_buffer_io_deq_bits_head; // @[IngressUnit.scala:75:30] wire _vcalloc_buffer_io_deq_bits_tail; // @[IngressUnit.scala:75:30] wire [72:0] _vcalloc_buffer_io_deq_bits_payload; // @[IngressUnit.scala:75:30] wire [3:0] _vcalloc_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:75:30] wire [5:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:75:30] wire [2:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:75:30] wire [5:0] _vcalloc_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:75:30] wire [2:0] _vcalloc_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:75:30] wire _route_q_io_enq_ready; // @[IngressUnit.scala:27:23] wire _route_q_io_deq_valid; // @[IngressUnit.scala:27:23] wire _route_buffer_io_enq_ready; // @[IngressUnit.scala:26:28] wire _route_buffer_io_deq_valid; // @[IngressUnit.scala:26:28] wire _route_buffer_io_deq_bits_head; // @[IngressUnit.scala:26:28] wire _route_buffer_io_deq_bits_tail; // @[IngressUnit.scala:26:28] wire [72:0] _route_buffer_io_deq_bits_payload; // @[IngressUnit.scala:26:28] wire [3:0] _route_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:26:28] wire [5:0] _route_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:26:28] wire [2:0] _route_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:26:28] wire [5:0] _route_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:26:28] wire [2:0] _route_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:26:28] wire [4:0] _route_buffer_io_deq_bits_virt_channel_id; // @[IngressUnit.scala:26:28] wire _route_buffer_io_enq_bits_flow_egress_node_id_T = io_in_bits_egress_id == 6'h27; // @[IngressUnit.scala:30:72] wire _route_buffer_io_enq_bits_flow_egress_node_id_T_1 = io_in_bits_egress_id == 6'h2A; // @[IngressUnit.scala:30:72] wire _route_buffer_io_enq_bits_flow_egress_node_id_T_2 = io_in_bits_egress_id == 6'h2D; // @[IngressUnit.scala:30:72] wire _route_buffer_io_enq_bits_flow_egress_node_id_T_3 = io_in_bits_egress_id == 6'h30; // @[IngressUnit.scala:30:72] wire [2:0] _GEN_0 = {1'h0, _route_buffer_io_enq_bits_flow_egress_node_id_T_1, 1'h0} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_3 ? 3'h6 : 3'h0); // @[Mux.scala:30:73] wire [2:0] _route_buffer_io_enq_bits_flow_egress_node_id_T_8 = {1'h0, {2{_route_buffer_io_enq_bits_flow_egress_node_id_T}}} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_1 ? 3'h5 : 3'h0); // @[Mux.scala:30:73] wire [2:0] _GEN_1 = {_route_buffer_io_enq_bits_flow_egress_node_id_T_2, _GEN_0[2:1]}; // @[Mux.scala:30:73] assign _GEN = _route_buffer_io_enq_ready & io_in_valid & io_in_bits_head & _GEN_1 == 3'h5; // @[Decoupled.scala:51:35] wire route_q_io_enq_valid = _GEN | io_in_valid & _route_buffer_io_enq_ready & io_in_bits_head & _GEN_1 != 3'h5; // @[Decoupled.scala:51:35] wire io_vcalloc_req_valid_0 = _route_buffer_io_deq_valid & _route_q_io_deq_valid & _route_buffer_io_deq_bits_head & _vcalloc_buffer_io_enq_ready & _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :91:{54,78}, :92:{10,41}] wire route_buffer_io_deq_ready = _vcalloc_buffer_io_enq_ready & (_route_q_io_deq_valid | ~_route_buffer_io_deq_bits_head) & (io_vcalloc_req_ready | ~_route_buffer_io_deq_bits_head) & (_vcalloc_q_io_enq_ready | ~_route_buffer_io_deq_bits_head); // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :88:30, :93:61, :94:{27,37}, :95:{27,37}, :96:29] wire vcalloc_q_io_enq_valid = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_3( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27] wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_41 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_47 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:57:20] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28] wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54] wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52] wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79] wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51] wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35] wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35] wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34] wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34] wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34] wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34] wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire _source_ok_T_25 = io_in_a_bits_source_0 == 7'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31] wire _source_ok_T_26 = io_in_a_bits_source_0 == 7'h21; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31] wire _source_ok_T_27 = io_in_a_bits_source_0 == 7'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31] wire _source_ok_T_28 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_8 = _source_ok_T_28; // @[Parameters.scala:1138:31] wire _source_ok_T_29 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_30 = _source_ok_T_29 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_31 = _source_ok_T_30 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_32 = _source_ok_T_31 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_34 = _source_ok_T_33 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_35 = _source_ok_T_34 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_35 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [28:0] _is_aligned_T = {23'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_36 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_36; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_37 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_43 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_49 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_55 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_38 = _source_ok_T_37 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_42 = _source_ok_T_40; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_42; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_44 = _source_ok_T_43 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_48 = _source_ok_T_46; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_48; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_50 = _source_ok_T_49 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_54; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_56 = _source_ok_T_55 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_60; // @[Parameters.scala:1138:31] wire _source_ok_T_61 = io_in_d_bits_source_0 == 7'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_5 = _source_ok_T_61; // @[Parameters.scala:1138:31] wire _source_ok_T_62 = io_in_d_bits_source_0 == 7'h21; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_62; // @[Parameters.scala:1138:31] wire _source_ok_T_63 = io_in_d_bits_source_0 == 7'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_63; // @[Parameters.scala:1138:31] wire _source_ok_T_64 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_8 = _source_ok_T_64; // @[Parameters.scala:1138:31] wire _source_ok_T_65 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_66 = _source_ok_T_65 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_67 = _source_ok_T_66 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_68 = _source_ok_T_67 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_69 = _source_ok_T_68 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_70 = _source_ok_T_69 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_71 = _source_ok_T_70 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_71 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire _T_1161 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1161; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1161; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [28:0] address; // @[Monitor.scala:391:22] wire _T_1234 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1234; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1234; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1234; // @[Decoupled.scala:51:35] wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [64:0] inflight; // @[Monitor.scala:614:27] reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [259:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [64:0] a_set; // @[Monitor.scala:626:34] wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [259:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [127:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1087 = _T_1161 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1087 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1087 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1087 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1087 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1087 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [64:0] d_clr; // @[Monitor.scala:664:34] wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_1133 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1133 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1102 = _T_1234 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1102 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1102 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1102 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [64:0] inflight_1; // @[Monitor.scala:726:35] wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [64:0] d_clr_1; // @[Monitor.scala:774:34] wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1205 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1205 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1187 = _T_1234 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1187 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1187 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1187 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113] wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module MacUnit_201( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [7:0] io_in_a, // @[PE.scala:16:14] input [7:0] io_in_b, // @[PE.scala:16:14] input [19:0] io_in_c, // @[PE.scala:16:14] output [19:0] io_out_d // @[PE.scala:16:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7] wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7] wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54] wire [19:0] io_out_d_0; // @[PE.scala:14:7] wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7] wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54] assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54] assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7] assign io_out_d = io_out_d_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } } File rename-stage.scala: //****************************************************************************** // Copyright (c) 2012 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Datapath: Rename Logic //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // // Supports 1-cycle and 2-cycle latencies. (aka, passthrough versus registers between ren1 and ren2). // - ren1: read the map tables and allocate a new physical register from the freelist. // - ren2: read the busy table for the physical operands. // // Ren1 data is provided as an output to be fed directly into the ROB. package boom.v3.exu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import boom.v3.common._ import boom.v3.util._ /** * IO bundle to interface with the Register Rename logic * * @param plWidth pipeline width * @param numIntPregs number of int physical registers * @param numFpPregs number of FP physical registers * @param numWbPorts number of int writeback ports * @param numWbPorts number of FP writeback ports */ class RenameStageIO( val plWidth: Int, val numPhysRegs: Int, val numWbPorts: Int) (implicit p: Parameters) extends BoomBundle /** * IO bundle to debug the rename stage */ class DebugRenameStageIO(val numPhysRegs: Int)(implicit p: Parameters) extends BoomBundle { val freelist = Bits(numPhysRegs.W) val isprlist = Bits(numPhysRegs.W) val busytable = UInt(numPhysRegs.W) } abstract class AbstractRenameStage( plWidth: Int, numPhysRegs: Int, numWbPorts: Int) (implicit p: Parameters) extends BoomModule { val io = IO(new Bundle { val ren_stalls = Output(Vec(plWidth, Bool())) val kill = Input(Bool()) val dec_fire = Input(Vec(plWidth, Bool())) // will commit state updates val dec_uops = Input(Vec(plWidth, new MicroOp())) // physical specifiers available AND busy/ready status available. val ren2_mask = Vec(plWidth, Output(Bool())) // mask of valid instructions val ren2_uops = Vec(plWidth, Output(new MicroOp())) // branch resolution (execute) val brupdate = Input(new BrUpdateInfo()) val dis_fire = Input(Vec(coreWidth, Bool())) val dis_ready = Input(Bool()) // wakeup ports val wakeups = Flipped(Vec(numWbPorts, Valid(new ExeUnitResp(xLen)))) // commit stage val com_valids = Input(Vec(plWidth, Bool())) val com_uops = Input(Vec(plWidth, new MicroOp())) val rbk_valids = Input(Vec(plWidth, Bool())) val rollback = Input(Bool()) val debug_rob_empty = Input(Bool()) val debug = Output(new DebugRenameStageIO(numPhysRegs)) }) io.ren_stalls.foreach(_ := false.B) io.debug := DontCare def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp //------------------------------------------------------------- // Pipeline State & Wires // Stage 1 val ren1_fire = Wire(Vec(plWidth, Bool())) val ren1_uops = Wire(Vec(plWidth, new MicroOp)) // Stage 2 val ren2_fire = io.dis_fire val ren2_ready = io.dis_ready val ren2_valids = Wire(Vec(plWidth, Bool())) val ren2_uops = Wire(Vec(plWidth, new MicroOp)) val ren2_alloc_reqs = Wire(Vec(plWidth, Bool())) //------------------------------------------------------------- // pipeline registers for (w <- 0 until plWidth) { ren1_fire(w) := io.dec_fire(w) ren1_uops(w) := io.dec_uops(w) } for (w <- 0 until plWidth) { val r_valid = RegInit(false.B) val r_uop = Reg(new MicroOp) val next_uop = Wire(new MicroOp) next_uop := r_uop when (io.kill) { r_valid := false.B } .elsewhen (ren2_ready) { r_valid := ren1_fire(w) next_uop := ren1_uops(w) } .otherwise { r_valid := r_valid && !ren2_fire(w) // clear bit if uop gets dispatched next_uop := r_uop } r_uop := GetNewUopAndBrMask(BypassAllocations(next_uop, ren2_uops, ren2_alloc_reqs), io.brupdate) ren2_valids(w) := r_valid ren2_uops(w) := r_uop } //------------------------------------------------------------- // Outputs io.ren2_mask := ren2_valids } /** * Rename stage that connets the map table, free list, and busy table. * Can be used in both the FP pipeline and the normal execute pipeline. * * @param plWidth pipeline width * @param numWbPorts number of int writeback ports * @param numWbPorts number of FP writeback ports */ class RenameStage( plWidth: Int, numPhysRegs: Int, numWbPorts: Int, float: Boolean) (implicit p: Parameters) extends AbstractRenameStage(plWidth, numPhysRegs, numWbPorts)(p) { val pregSz = log2Ceil(numPhysRegs) val rtype = if (float) RT_FLT else RT_FIX //------------------------------------------------------------- // Helper Functions def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp = { val bypassed_uop = Wire(new MicroOp) bypassed_uop := uop val bypass_hits_rs1 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs1 } val bypass_hits_rs2 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs2 } val bypass_hits_rs3 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs3 } val bypass_hits_dst = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.ldst } val bypass_sel_rs1 = PriorityEncoderOH(bypass_hits_rs1.reverse).reverse val bypass_sel_rs2 = PriorityEncoderOH(bypass_hits_rs2.reverse).reverse val bypass_sel_rs3 = PriorityEncoderOH(bypass_hits_rs3.reverse).reverse val bypass_sel_dst = PriorityEncoderOH(bypass_hits_dst.reverse).reverse val do_bypass_rs1 = bypass_hits_rs1.reduce(_||_) val do_bypass_rs2 = bypass_hits_rs2.reduce(_||_) val do_bypass_rs3 = bypass_hits_rs3.reduce(_||_) val do_bypass_dst = bypass_hits_dst.reduce(_||_) val bypass_pdsts = older_uops.map(_.pdst) when (do_bypass_rs1) { bypassed_uop.prs1 := Mux1H(bypass_sel_rs1, bypass_pdsts) } when (do_bypass_rs2) { bypassed_uop.prs2 := Mux1H(bypass_sel_rs2, bypass_pdsts) } when (do_bypass_rs3) { bypassed_uop.prs3 := Mux1H(bypass_sel_rs3, bypass_pdsts) } when (do_bypass_dst) { bypassed_uop.stale_pdst := Mux1H(bypass_sel_dst, bypass_pdsts) } bypassed_uop.prs1_busy := uop.prs1_busy || do_bypass_rs1 bypassed_uop.prs2_busy := uop.prs2_busy || do_bypass_rs2 bypassed_uop.prs3_busy := uop.prs3_busy || do_bypass_rs3 if (!float) { bypassed_uop.prs3 := DontCare bypassed_uop.prs3_busy := false.B } bypassed_uop } //------------------------------------------------------------- // Rename Structures val maptable = Module(new RenameMapTable( plWidth, 32, numPhysRegs, false, float)) val freelist = Module(new RenameFreeList( plWidth, numPhysRegs, if (float) 32 else 31)) val busytable = Module(new RenameBusyTable( plWidth, numPhysRegs, numWbPorts, false, float)) val ren2_br_tags = Wire(Vec(plWidth, Valid(UInt(brTagSz.W)))) // Commit/Rollback val com_valids = Wire(Vec(plWidth, Bool())) val rbk_valids = Wire(Vec(plWidth, Bool())) for (w <- 0 until plWidth) { ren2_alloc_reqs(w) := ren2_uops(w).ldst_val && ren2_uops(w).dst_rtype === rtype && ren2_fire(w) ren2_br_tags(w).valid := ren2_fire(w) && ren2_uops(w).allocate_brtag com_valids(w) := io.com_uops(w).ldst_val && io.com_uops(w).dst_rtype === rtype && io.com_valids(w) rbk_valids(w) := io.com_uops(w).ldst_val && io.com_uops(w).dst_rtype === rtype && io.rbk_valids(w) ren2_br_tags(w).bits := ren2_uops(w).br_tag } //------------------------------------------------------------- // Rename Table // Maptable inputs. val map_reqs = Wire(Vec(plWidth, new MapReq(lregSz))) val remap_reqs = Wire(Vec(plWidth, new RemapReq(lregSz, pregSz))) // Generate maptable requests. for ((((ren1,ren2),com),w) <- (ren1_uops zip ren2_uops zip io.com_uops.reverse).zipWithIndex) { map_reqs(w).lrs1 := ren1.lrs1 map_reqs(w).lrs2 := ren1.lrs2 map_reqs(w).lrs3 := ren1.lrs3 map_reqs(w).ldst := ren1.ldst remap_reqs(w).ldst := Mux(io.rollback, com.ldst , ren2.ldst) remap_reqs(w).pdst := Mux(io.rollback, com.stale_pdst, ren2.pdst) } ren2_alloc_reqs zip rbk_valids.reverse zip remap_reqs map { case ((a,r),rr) => rr.valid := a || r} // Hook up inputs. maptable.io.map_reqs := map_reqs maptable.io.remap_reqs := remap_reqs maptable.io.ren_br_tags := ren2_br_tags maptable.io.brupdate := io.brupdate maptable.io.rollback := io.rollback // Maptable outputs. for ((uop, w) <- ren1_uops.zipWithIndex) { val mappings = maptable.io.map_resps(w) uop.prs1 := mappings.prs1 uop.prs2 := mappings.prs2 uop.prs3 := mappings.prs3 // only FP has 3rd operand uop.stale_pdst := mappings.stale_pdst } //------------------------------------------------------------- // Free List // Freelist inputs. freelist.io.reqs := ren2_alloc_reqs freelist.io.dealloc_pregs zip com_valids zip rbk_valids map {case ((d,c),r) => d.valid := c || r} freelist.io.dealloc_pregs zip io.com_uops map {case (d,c) => d.bits := Mux(io.rollback, c.pdst, c.stale_pdst)} freelist.io.ren_br_tags := ren2_br_tags freelist.io.brupdate := io.brupdate freelist.io.debug.pipeline_empty := io.debug_rob_empty assert (ren2_alloc_reqs zip freelist.io.alloc_pregs map {case (r,p) => !r || p.bits =/= 0.U} reduce (_&&_), "[rename-stage] A uop is trying to allocate the zero physical register.") // Freelist outputs. for ((uop, w) <- ren2_uops.zipWithIndex) { val preg = freelist.io.alloc_pregs(w).bits uop.pdst := Mux(uop.ldst =/= 0.U || float.B, preg, 0.U) } //------------------------------------------------------------- // Busy Table busytable.io.ren_uops := ren2_uops // expects pdst to be set up. busytable.io.rebusy_reqs := ren2_alloc_reqs busytable.io.wb_valids := io.wakeups.map(_.valid) busytable.io.wb_pdsts := io.wakeups.map(_.bits.uop.pdst) assert (!(io.wakeups.map(x => x.valid && x.bits.uop.dst_rtype =/= rtype).reduce(_||_)), "[rename] Wakeup has wrong rtype.") for ((uop, w) <- ren2_uops.zipWithIndex) { val busy = busytable.io.busy_resps(w) uop.prs1_busy := uop.lrs1_rtype === rtype && busy.prs1_busy uop.prs2_busy := uop.lrs2_rtype === rtype && busy.prs2_busy uop.prs3_busy := uop.frs3_en && busy.prs3_busy val valid = ren2_valids(w) assert (!(valid && busy.prs1_busy && rtype === RT_FIX && uop.lrs1 === 0.U), "[rename] x0 is busy??") assert (!(valid && busy.prs2_busy && rtype === RT_FIX && uop.lrs2 === 0.U), "[rename] x0 is busy??") } //------------------------------------------------------------- // Outputs for (w <- 0 until plWidth) { val can_allocate = freelist.io.alloc_pregs(w).valid // Push back against Decode stage if Rename1 can't proceed. io.ren_stalls(w) := (ren2_uops(w).dst_rtype === rtype) && !can_allocate val bypassed_uop = Wire(new MicroOp) if (w > 0) bypassed_uop := BypassAllocations(ren2_uops(w), ren2_uops.slice(0,w), ren2_alloc_reqs.slice(0,w)) else bypassed_uop := ren2_uops(w) io.ren2_uops(w) := GetNewUopAndBrMask(bypassed_uop, io.brupdate) } //------------------------------------------------------------- // Debug signals io.debug.freelist := freelist.io.debug.freelist io.debug.isprlist := freelist.io.debug.isprlist io.debug.busytable := busytable.io.debug.busytable } class PredRenameStage( plWidth: Int, numPhysRegs: Int, numWbPorts: Int) (implicit p: Parameters) extends AbstractRenameStage(plWidth, numPhysRegs, numWbPorts)(p) { def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp = { uop } ren2_alloc_reqs := DontCare val busy_table = RegInit(VecInit(0.U(ftqSz.W).asBools)) val to_busy = WireInit(VecInit(0.U(ftqSz.W).asBools)) val unbusy = WireInit(VecInit(0.U(ftqSz.W).asBools)) val current_ftq_idx = Reg(UInt(log2Ceil(ftqSz).W)) var next_ftq_idx = current_ftq_idx for (w <- 0 until plWidth) { io.ren2_uops(w) := ren2_uops(w) val is_sfb_br = ren2_uops(w).is_sfb_br && ren2_fire(w) val is_sfb_shadow = ren2_uops(w).is_sfb_shadow && ren2_fire(w) val ftq_idx = ren2_uops(w).ftq_idx when (is_sfb_br) { io.ren2_uops(w).pdst := ftq_idx to_busy(ftq_idx) := true.B } next_ftq_idx = Mux(is_sfb_br, ftq_idx, next_ftq_idx) when (is_sfb_shadow) { io.ren2_uops(w).ppred := next_ftq_idx io.ren2_uops(w).ppred_busy := (busy_table(next_ftq_idx) || to_busy(next_ftq_idx)) && !unbusy(next_ftq_idx) } } for (w <- 0 until numWbPorts) { when (io.wakeups(w).valid) { unbusy(io.wakeups(w).bits.uop.pdst) := true.B } } current_ftq_idx := next_ftq_idx busy_table := ((busy_table.asUInt | to_busy.asUInt) & ~unbusy.asUInt).asBools } File micro-op.scala: //****************************************************************************** // Copyright (c) 2015 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // MicroOp //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.common import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import boom.v3.exu.FUConstants /** * Extension to BoomBundle to add a MicroOp */ abstract trait HasBoomUOP extends BoomBundle { val uop = new MicroOp() } /** * MicroOp passing through the pipeline */ class MicroOp(implicit p: Parameters) extends BoomBundle with freechips.rocketchip.rocket.constants.MemoryOpConstants with freechips.rocketchip.rocket.constants.ScalarOpConstants { val uopc = UInt(UOPC_SZ.W) // micro-op code val inst = UInt(32.W) val debug_inst = UInt(32.W) val is_rvc = Bool() val debug_pc = UInt(coreMaxAddrBits.W) val iq_type = UInt(IQT_SZ.W) // which issue unit do we use? val fu_code = UInt(FUConstants.FUC_SZ.W) // which functional unit do we use? val ctrl = new CtrlSignals // What is the next state of this uop in the issue window? useful // for the compacting queue. val iw_state = UInt(2.W) // Has operand 1 or 2 been waken speculatively by a load? // Only integer operands are speculaively woken up, // so we can ignore p3. val iw_p1_poisoned = Bool() val iw_p2_poisoned = Bool() val is_br = Bool() // is this micro-op a (branch) vs a regular PC+4 inst? val is_jalr = Bool() // is this a jump? (jal or jalr) val is_jal = Bool() // is this a JAL (doesn't include JR)? used for branch unit val is_sfb = Bool() // is this a sfb or in the shadow of a sfb val br_mask = UInt(maxBrCount.W) // which branches are we being speculated under? val br_tag = UInt(brTagSz.W) // Index into FTQ to figure out our fetch PC. val ftq_idx = UInt(log2Ceil(ftqSz).W) // This inst straddles two fetch packets val edge_inst = Bool() // Low-order bits of our own PC. Combine with ftq[ftq_idx] to get PC. // Aligned to a cache-line size, as that is the greater fetch granularity. // TODO: Shouldn't this be aligned to fetch-width size? val pc_lob = UInt(log2Ceil(icBlockBytes).W) // Was this a branch that was predicted taken? val taken = Bool() val imm_packed = UInt(LONGEST_IMM_SZ.W) // densely pack the imm in decode... // then translate and sign-extend in execute val csr_addr = UInt(CSR_ADDR_SZ.W) // only used for critical path reasons in Exe val rob_idx = UInt(robAddrSz.W) val ldq_idx = UInt(ldqAddrSz.W) val stq_idx = UInt(stqAddrSz.W) val rxq_idx = UInt(log2Ceil(numRxqEntries).W) val pdst = UInt(maxPregSz.W) val prs1 = UInt(maxPregSz.W) val prs2 = UInt(maxPregSz.W) val prs3 = UInt(maxPregSz.W) val ppred = UInt(log2Ceil(ftqSz).W) val prs1_busy = Bool() val prs2_busy = Bool() val prs3_busy = Bool() val ppred_busy = Bool() val stale_pdst = UInt(maxPregSz.W) val exception = Bool() val exc_cause = UInt(xLen.W) // TODO compress this down, xlen is insanity val bypassable = Bool() // can we bypass ALU results? (doesn't include loads, csr, etc...) val mem_cmd = UInt(M_SZ.W) // sync primitives/cache flushes val mem_size = UInt(2.W) val mem_signed = Bool() val is_fence = Bool() val is_fencei = Bool() val is_amo = Bool() val uses_ldq = Bool() val uses_stq = Bool() val is_sys_pc2epc = Bool() // Is a ECall or Breakpoint -- both set EPC to PC. val is_unique = Bool() // only allow this instruction in the pipeline, wait for STQ to // drain, clear fetcha fter it (tell ROB to un-ready until empty) val flush_on_commit = Bool() // some instructions need to flush the pipeline behind them // Preditation def is_sfb_br = is_br && is_sfb && enableSFBOpt.B // Does this write a predicate def is_sfb_shadow = !is_br && is_sfb && enableSFBOpt.B // Is this predicated val ldst_is_rs1 = Bool() // If this is set and we are predicated off, copy rs1 to dst, // else copy rs2 to dst // logical specifiers (only used in Decode->Rename), except rollback (ldst) val ldst = UInt(lregSz.W) val lrs1 = UInt(lregSz.W) val lrs2 = UInt(lregSz.W) val lrs3 = UInt(lregSz.W) val ldst_val = Bool() // is there a destination? invalid for stores, rd==x0, etc. val dst_rtype = UInt(2.W) val lrs1_rtype = UInt(2.W) val lrs2_rtype = UInt(2.W) val frs3_en = Bool() // floating point information val fp_val = Bool() // is a floating-point instruction (F- or D-extension)? // If it's non-ld/st it will write back exception bits to the fcsr. val fp_single = Bool() // single-precision floating point instruction (F-extension) // frontend exception information val xcpt_pf_if = Bool() // I-TLB page fault. val xcpt_ae_if = Bool() // I$ access exception. val xcpt_ma_if = Bool() // Misaligned fetch (jal/brjumping to misaligned addr). val bp_debug_if = Bool() // Breakpoint val bp_xcpt_if = Bool() // Breakpoint // What prediction structure provides the prediction FROM this op val debug_fsrc = UInt(BSRC_SZ.W) // What prediction structure provides the prediction TO this op val debug_tsrc = UInt(BSRC_SZ.W) // Do we allocate a branch tag for this? // SFB branches don't get a mask, they get a predicate bit def allocate_brtag = (is_br && !is_sfb) || is_jalr // Does this register write-back def rf_wen = dst_rtype =/= RT_X // Is it possible for this uop to misspeculate, preventing the commit of subsequent uops? def unsafe = uses_ldq || (uses_stq && !is_fence) || is_br || is_jalr def fu_code_is(_fu: UInt) = (fu_code & _fu) =/= 0.U } /** * Control signals within a MicroOp * * TODO REFACTOR this, as this should no longer be true, as bypass occurs in stage before branch resolution */ class CtrlSignals extends Bundle() { val br_type = UInt(BR_N.getWidth.W) val op1_sel = UInt(OP1_X.getWidth.W) val op2_sel = UInt(OP2_X.getWidth.W) val imm_sel = UInt(IS_X.getWidth.W) val op_fcn = UInt(freechips.rocketchip.rocket.ALU.SZ_ALU_FN.W) val fcn_dw = Bool() val csr_cmd = UInt(freechips.rocketchip.rocket.CSR.SZ.W) val is_load = Bool() // will invoke TLB address lookup val is_sta = Bool() // will invoke TLB address lookup val is_std = Bool() }
module PredRenameStage( // @[rename-stage.scala:356:7] input clock, // @[rename-stage.scala:356:7] input reset, // @[rename-stage.scala:356:7] input io_kill, // @[rename-stage.scala:60:14] input io_dec_fire_0, // @[rename-stage.scala:60:14] input io_dec_fire_1, // @[rename-stage.scala:60:14] input io_dec_fire_2, // @[rename-stage.scala:60:14] input [6:0] io_dec_uops_0_uopc, // @[rename-stage.scala:60:14] input [31:0] io_dec_uops_0_inst, // @[rename-stage.scala:60:14] input [31:0] io_dec_uops_0_debug_inst, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_dec_uops_0_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_dec_uops_0_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_dec_uops_0_fu_code, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_br, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_jalr, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_jal, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_sfb, // @[rename-stage.scala:60:14] input [15:0] io_dec_uops_0_br_mask, // @[rename-stage.scala:60:14] input [3:0] io_dec_uops_0_br_tag, // @[rename-stage.scala:60:14] input [4:0] io_dec_uops_0_ftq_idx, // @[rename-stage.scala:60:14] input io_dec_uops_0_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_0_pc_lob, // @[rename-stage.scala:60:14] input io_dec_uops_0_taken, // @[rename-stage.scala:60:14] input [19:0] io_dec_uops_0_imm_packed, // @[rename-stage.scala:60:14] input io_dec_uops_0_exception, // @[rename-stage.scala:60:14] input [63:0] io_dec_uops_0_exc_cause, // @[rename-stage.scala:60:14] input io_dec_uops_0_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_dec_uops_0_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_0_mem_size, // @[rename-stage.scala:60:14] input io_dec_uops_0_mem_signed, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_fence, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_fencei, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_amo, // @[rename-stage.scala:60:14] input io_dec_uops_0_uses_ldq, // @[rename-stage.scala:60:14] input io_dec_uops_0_uses_stq, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_unique, // @[rename-stage.scala:60:14] input io_dec_uops_0_flush_on_commit, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_0_ldst, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_0_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_0_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_0_lrs3, // @[rename-stage.scala:60:14] input io_dec_uops_0_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_0_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_0_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_0_lrs2_rtype, // @[rename-stage.scala:60:14] input io_dec_uops_0_frs3_en, // @[rename-stage.scala:60:14] input io_dec_uops_0_fp_val, // @[rename-stage.scala:60:14] input io_dec_uops_0_fp_single, // @[rename-stage.scala:60:14] input io_dec_uops_0_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_dec_uops_0_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_dec_uops_0_bp_debug_if, // @[rename-stage.scala:60:14] input io_dec_uops_0_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_0_debug_fsrc, // @[rename-stage.scala:60:14] input [6:0] io_dec_uops_1_uopc, // @[rename-stage.scala:60:14] input [31:0] io_dec_uops_1_inst, // @[rename-stage.scala:60:14] input [31:0] io_dec_uops_1_debug_inst, // @[rename-stage.scala:60:14] input io_dec_uops_1_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_dec_uops_1_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_dec_uops_1_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_dec_uops_1_fu_code, // @[rename-stage.scala:60:14] input io_dec_uops_1_is_br, // @[rename-stage.scala:60:14] input io_dec_uops_1_is_jalr, // @[rename-stage.scala:60:14] input io_dec_uops_1_is_jal, // @[rename-stage.scala:60:14] input io_dec_uops_1_is_sfb, // @[rename-stage.scala:60:14] input [15:0] io_dec_uops_1_br_mask, // @[rename-stage.scala:60:14] input [3:0] io_dec_uops_1_br_tag, // @[rename-stage.scala:60:14] input [4:0] io_dec_uops_1_ftq_idx, // @[rename-stage.scala:60:14] input io_dec_uops_1_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_1_pc_lob, // @[rename-stage.scala:60:14] input io_dec_uops_1_taken, // @[rename-stage.scala:60:14] input [19:0] io_dec_uops_1_imm_packed, // @[rename-stage.scala:60:14] input io_dec_uops_1_exception, // @[rename-stage.scala:60:14] input [63:0] io_dec_uops_1_exc_cause, // @[rename-stage.scala:60:14] input io_dec_uops_1_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_dec_uops_1_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_1_mem_size, // @[rename-stage.scala:60:14] input io_dec_uops_1_mem_signed, // @[rename-stage.scala:60:14] input io_dec_uops_1_is_fence, // @[rename-stage.scala:60:14] input io_dec_uops_1_is_fencei, // @[rename-stage.scala:60:14] input io_dec_uops_1_is_amo, // @[rename-stage.scala:60:14] input io_dec_uops_1_uses_ldq, // @[rename-stage.scala:60:14] input io_dec_uops_1_uses_stq, // @[rename-stage.scala:60:14] input io_dec_uops_1_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_dec_uops_1_is_unique, // @[rename-stage.scala:60:14] input io_dec_uops_1_flush_on_commit, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_1_ldst, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_1_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_1_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_1_lrs3, // @[rename-stage.scala:60:14] input io_dec_uops_1_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_1_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_1_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_1_lrs2_rtype, // @[rename-stage.scala:60:14] input io_dec_uops_1_frs3_en, // @[rename-stage.scala:60:14] input io_dec_uops_1_fp_val, // @[rename-stage.scala:60:14] input io_dec_uops_1_fp_single, // @[rename-stage.scala:60:14] input io_dec_uops_1_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_dec_uops_1_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_dec_uops_1_bp_debug_if, // @[rename-stage.scala:60:14] input io_dec_uops_1_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_1_debug_fsrc, // @[rename-stage.scala:60:14] input [6:0] io_dec_uops_2_uopc, // @[rename-stage.scala:60:14] input [31:0] io_dec_uops_2_inst, // @[rename-stage.scala:60:14] input [31:0] io_dec_uops_2_debug_inst, // @[rename-stage.scala:60:14] input io_dec_uops_2_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_dec_uops_2_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_dec_uops_2_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_dec_uops_2_fu_code, // @[rename-stage.scala:60:14] input io_dec_uops_2_is_br, // @[rename-stage.scala:60:14] input io_dec_uops_2_is_jalr, // @[rename-stage.scala:60:14] input io_dec_uops_2_is_jal, // @[rename-stage.scala:60:14] input io_dec_uops_2_is_sfb, // @[rename-stage.scala:60:14] input [15:0] io_dec_uops_2_br_mask, // @[rename-stage.scala:60:14] input [3:0] io_dec_uops_2_br_tag, // @[rename-stage.scala:60:14] input [4:0] io_dec_uops_2_ftq_idx, // @[rename-stage.scala:60:14] input io_dec_uops_2_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_2_pc_lob, // @[rename-stage.scala:60:14] input io_dec_uops_2_taken, // @[rename-stage.scala:60:14] input [19:0] io_dec_uops_2_imm_packed, // @[rename-stage.scala:60:14] input io_dec_uops_2_exception, // @[rename-stage.scala:60:14] input [63:0] io_dec_uops_2_exc_cause, // @[rename-stage.scala:60:14] input io_dec_uops_2_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_dec_uops_2_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_2_mem_size, // @[rename-stage.scala:60:14] input io_dec_uops_2_mem_signed, // @[rename-stage.scala:60:14] input io_dec_uops_2_is_fence, // @[rename-stage.scala:60:14] input io_dec_uops_2_is_fencei, // @[rename-stage.scala:60:14] input io_dec_uops_2_is_amo, // @[rename-stage.scala:60:14] input io_dec_uops_2_uses_ldq, // @[rename-stage.scala:60:14] input io_dec_uops_2_uses_stq, // @[rename-stage.scala:60:14] input io_dec_uops_2_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_dec_uops_2_is_unique, // @[rename-stage.scala:60:14] input io_dec_uops_2_flush_on_commit, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_2_ldst, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_2_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_2_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_2_lrs3, // @[rename-stage.scala:60:14] input io_dec_uops_2_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_2_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_2_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_2_lrs2_rtype, // @[rename-stage.scala:60:14] input io_dec_uops_2_frs3_en, // @[rename-stage.scala:60:14] input io_dec_uops_2_fp_val, // @[rename-stage.scala:60:14] input io_dec_uops_2_fp_single, // @[rename-stage.scala:60:14] input io_dec_uops_2_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_dec_uops_2_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_dec_uops_2_bp_debug_if, // @[rename-stage.scala:60:14] input io_dec_uops_2_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_2_debug_fsrc, // @[rename-stage.scala:60:14] input [15:0] io_brupdate_b1_resolve_mask, // @[rename-stage.scala:60:14] input [15:0] io_brupdate_b1_mispredict_mask, // @[rename-stage.scala:60:14] input [6:0] io_brupdate_b2_uop_uopc, // @[rename-stage.scala:60:14] input [31:0] io_brupdate_b2_uop_inst, // @[rename-stage.scala:60:14] input [31:0] io_brupdate_b2_uop_debug_inst, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_brupdate_b2_uop_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_uop_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_brupdate_b2_uop_fu_code, // @[rename-stage.scala:60:14] input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[rename-stage.scala:60:14] input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ctrl_fcn_dw, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ctrl_is_load, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ctrl_is_sta, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ctrl_is_std, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_iw_state, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_iw_p1_poisoned, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_iw_p2_poisoned, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_br, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_jalr, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_jal, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_sfb, // @[rename-stage.scala:60:14] input [15:0] io_brupdate_b2_uop_br_mask, // @[rename-stage.scala:60:14] input [3:0] io_brupdate_b2_uop_br_tag, // @[rename-stage.scala:60:14] input [4:0] io_brupdate_b2_uop_ftq_idx, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_pc_lob, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_taken, // @[rename-stage.scala:60:14] input [19:0] io_brupdate_b2_uop_imm_packed, // @[rename-stage.scala:60:14] input [11:0] io_brupdate_b2_uop_csr_addr, // @[rename-stage.scala:60:14] input [6:0] io_brupdate_b2_uop_rob_idx, // @[rename-stage.scala:60:14] input [4:0] io_brupdate_b2_uop_ldq_idx, // @[rename-stage.scala:60:14] input [4:0] io_brupdate_b2_uop_stq_idx, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_rxq_idx, // @[rename-stage.scala:60:14] input [6:0] io_brupdate_b2_uop_pdst, // @[rename-stage.scala:60:14] input [6:0] io_brupdate_b2_uop_prs1, // @[rename-stage.scala:60:14] input [6:0] io_brupdate_b2_uop_prs2, // @[rename-stage.scala:60:14] input [6:0] io_brupdate_b2_uop_prs3, // @[rename-stage.scala:60:14] input [4:0] io_brupdate_b2_uop_ppred, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_prs1_busy, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_prs2_busy, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_prs3_busy, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ppred_busy, // @[rename-stage.scala:60:14] input [6:0] io_brupdate_b2_uop_stale_pdst, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_exception, // @[rename-stage.scala:60:14] input [63:0] io_brupdate_b2_uop_exc_cause, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_brupdate_b2_uop_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_mem_size, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_mem_signed, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_fence, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_fencei, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_amo, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_uses_ldq, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_uses_stq, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_unique, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_flush_on_commit, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ldst_is_rs1, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_ldst, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_lrs3, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_frs3_en, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_fp_val, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_fp_single, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_xcpt_ma_if, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_bp_debug_if, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[rename-stage.scala:60:14] input io_brupdate_b2_valid, // @[rename-stage.scala:60:14] input io_brupdate_b2_mispredict, // @[rename-stage.scala:60:14] input io_brupdate_b2_taken, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_cfi_type, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_pc_sel, // @[rename-stage.scala:60:14] input [39:0] io_brupdate_b2_jalr_target, // @[rename-stage.scala:60:14] input [20:0] io_brupdate_b2_target_offset, // @[rename-stage.scala:60:14] input io_dis_fire_0, // @[rename-stage.scala:60:14] input io_dis_fire_1, // @[rename-stage.scala:60:14] input io_dis_fire_2, // @[rename-stage.scala:60:14] input io_dis_ready, // @[rename-stage.scala:60:14] input io_com_valids_0, // @[rename-stage.scala:60:14] input io_com_valids_1, // @[rename-stage.scala:60:14] input io_com_valids_2, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_0_uopc, // @[rename-stage.scala:60:14] input [31:0] io_com_uops_0_inst, // @[rename-stage.scala:60:14] input [31:0] io_com_uops_0_debug_inst, // @[rename-stage.scala:60:14] input io_com_uops_0_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_com_uops_0_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_0_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_com_uops_0_fu_code, // @[rename-stage.scala:60:14] input [3:0] io_com_uops_0_ctrl_br_type, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_ctrl_op1_sel, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_0_ctrl_op2_sel, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_0_ctrl_imm_sel, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_0_ctrl_op_fcn, // @[rename-stage.scala:60:14] input io_com_uops_0_ctrl_fcn_dw, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_0_ctrl_csr_cmd, // @[rename-stage.scala:60:14] input io_com_uops_0_ctrl_is_load, // @[rename-stage.scala:60:14] input io_com_uops_0_ctrl_is_sta, // @[rename-stage.scala:60:14] input io_com_uops_0_ctrl_is_std, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_iw_state, // @[rename-stage.scala:60:14] input io_com_uops_0_iw_p1_poisoned, // @[rename-stage.scala:60:14] input io_com_uops_0_iw_p2_poisoned, // @[rename-stage.scala:60:14] input io_com_uops_0_is_br, // @[rename-stage.scala:60:14] input io_com_uops_0_is_jalr, // @[rename-stage.scala:60:14] input io_com_uops_0_is_jal, // @[rename-stage.scala:60:14] input io_com_uops_0_is_sfb, // @[rename-stage.scala:60:14] input [15:0] io_com_uops_0_br_mask, // @[rename-stage.scala:60:14] input [3:0] io_com_uops_0_br_tag, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_0_ftq_idx, // @[rename-stage.scala:60:14] input io_com_uops_0_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_pc_lob, // @[rename-stage.scala:60:14] input io_com_uops_0_taken, // @[rename-stage.scala:60:14] input [19:0] io_com_uops_0_imm_packed, // @[rename-stage.scala:60:14] input [11:0] io_com_uops_0_csr_addr, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_0_rob_idx, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_0_ldq_idx, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_0_stq_idx, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_rxq_idx, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_0_pdst, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_0_prs1, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_0_prs2, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_0_prs3, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_0_ppred, // @[rename-stage.scala:60:14] input io_com_uops_0_prs1_busy, // @[rename-stage.scala:60:14] input io_com_uops_0_prs2_busy, // @[rename-stage.scala:60:14] input io_com_uops_0_prs3_busy, // @[rename-stage.scala:60:14] input io_com_uops_0_ppred_busy, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_0_stale_pdst, // @[rename-stage.scala:60:14] input io_com_uops_0_exception, // @[rename-stage.scala:60:14] input [63:0] io_com_uops_0_exc_cause, // @[rename-stage.scala:60:14] input io_com_uops_0_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_0_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_mem_size, // @[rename-stage.scala:60:14] input io_com_uops_0_mem_signed, // @[rename-stage.scala:60:14] input io_com_uops_0_is_fence, // @[rename-stage.scala:60:14] input io_com_uops_0_is_fencei, // @[rename-stage.scala:60:14] input io_com_uops_0_is_amo, // @[rename-stage.scala:60:14] input io_com_uops_0_uses_ldq, // @[rename-stage.scala:60:14] input io_com_uops_0_uses_stq, // @[rename-stage.scala:60:14] input io_com_uops_0_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_com_uops_0_is_unique, // @[rename-stage.scala:60:14] input io_com_uops_0_flush_on_commit, // @[rename-stage.scala:60:14] input io_com_uops_0_ldst_is_rs1, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_ldst, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_lrs3, // @[rename-stage.scala:60:14] input io_com_uops_0_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_lrs2_rtype, // @[rename-stage.scala:60:14] input io_com_uops_0_frs3_en, // @[rename-stage.scala:60:14] input io_com_uops_0_fp_val, // @[rename-stage.scala:60:14] input io_com_uops_0_fp_single, // @[rename-stage.scala:60:14] input io_com_uops_0_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_com_uops_0_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_com_uops_0_xcpt_ma_if, // @[rename-stage.scala:60:14] input io_com_uops_0_bp_debug_if, // @[rename-stage.scala:60:14] input io_com_uops_0_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_debug_fsrc, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_debug_tsrc, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_1_uopc, // @[rename-stage.scala:60:14] input [31:0] io_com_uops_1_inst, // @[rename-stage.scala:60:14] input [31:0] io_com_uops_1_debug_inst, // @[rename-stage.scala:60:14] input io_com_uops_1_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_com_uops_1_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_1_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_com_uops_1_fu_code, // @[rename-stage.scala:60:14] input [3:0] io_com_uops_1_ctrl_br_type, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_1_ctrl_op1_sel, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_1_ctrl_op2_sel, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_1_ctrl_imm_sel, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_1_ctrl_op_fcn, // @[rename-stage.scala:60:14] input io_com_uops_1_ctrl_fcn_dw, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_1_ctrl_csr_cmd, // @[rename-stage.scala:60:14] input io_com_uops_1_ctrl_is_load, // @[rename-stage.scala:60:14] input io_com_uops_1_ctrl_is_sta, // @[rename-stage.scala:60:14] input io_com_uops_1_ctrl_is_std, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_1_iw_state, // @[rename-stage.scala:60:14] input io_com_uops_1_iw_p1_poisoned, // @[rename-stage.scala:60:14] input io_com_uops_1_iw_p2_poisoned, // @[rename-stage.scala:60:14] input io_com_uops_1_is_br, // @[rename-stage.scala:60:14] input io_com_uops_1_is_jalr, // @[rename-stage.scala:60:14] input io_com_uops_1_is_jal, // @[rename-stage.scala:60:14] input io_com_uops_1_is_sfb, // @[rename-stage.scala:60:14] input [15:0] io_com_uops_1_br_mask, // @[rename-stage.scala:60:14] input [3:0] io_com_uops_1_br_tag, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_1_ftq_idx, // @[rename-stage.scala:60:14] input io_com_uops_1_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_1_pc_lob, // @[rename-stage.scala:60:14] input io_com_uops_1_taken, // @[rename-stage.scala:60:14] input [19:0] io_com_uops_1_imm_packed, // @[rename-stage.scala:60:14] input [11:0] io_com_uops_1_csr_addr, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_1_rob_idx, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_1_ldq_idx, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_1_stq_idx, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_1_rxq_idx, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_1_pdst, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_1_prs1, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_1_prs2, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_1_prs3, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_1_ppred, // @[rename-stage.scala:60:14] input io_com_uops_1_prs1_busy, // @[rename-stage.scala:60:14] input io_com_uops_1_prs2_busy, // @[rename-stage.scala:60:14] input io_com_uops_1_prs3_busy, // @[rename-stage.scala:60:14] input io_com_uops_1_ppred_busy, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_1_stale_pdst, // @[rename-stage.scala:60:14] input io_com_uops_1_exception, // @[rename-stage.scala:60:14] input [63:0] io_com_uops_1_exc_cause, // @[rename-stage.scala:60:14] input io_com_uops_1_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_1_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_1_mem_size, // @[rename-stage.scala:60:14] input io_com_uops_1_mem_signed, // @[rename-stage.scala:60:14] input io_com_uops_1_is_fence, // @[rename-stage.scala:60:14] input io_com_uops_1_is_fencei, // @[rename-stage.scala:60:14] input io_com_uops_1_is_amo, // @[rename-stage.scala:60:14] input io_com_uops_1_uses_ldq, // @[rename-stage.scala:60:14] input io_com_uops_1_uses_stq, // @[rename-stage.scala:60:14] input io_com_uops_1_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_com_uops_1_is_unique, // @[rename-stage.scala:60:14] input io_com_uops_1_flush_on_commit, // @[rename-stage.scala:60:14] input io_com_uops_1_ldst_is_rs1, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_1_ldst, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_1_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_1_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_1_lrs3, // @[rename-stage.scala:60:14] input io_com_uops_1_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_1_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_1_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_1_lrs2_rtype, // @[rename-stage.scala:60:14] input io_com_uops_1_frs3_en, // @[rename-stage.scala:60:14] input io_com_uops_1_fp_val, // @[rename-stage.scala:60:14] input io_com_uops_1_fp_single, // @[rename-stage.scala:60:14] input io_com_uops_1_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_com_uops_1_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_com_uops_1_xcpt_ma_if, // @[rename-stage.scala:60:14] input io_com_uops_1_bp_debug_if, // @[rename-stage.scala:60:14] input io_com_uops_1_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_1_debug_fsrc, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_1_debug_tsrc, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_2_uopc, // @[rename-stage.scala:60:14] input [31:0] io_com_uops_2_inst, // @[rename-stage.scala:60:14] input [31:0] io_com_uops_2_debug_inst, // @[rename-stage.scala:60:14] input io_com_uops_2_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_com_uops_2_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_2_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_com_uops_2_fu_code, // @[rename-stage.scala:60:14] input [3:0] io_com_uops_2_ctrl_br_type, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_2_ctrl_op1_sel, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_2_ctrl_op2_sel, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_2_ctrl_imm_sel, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_2_ctrl_op_fcn, // @[rename-stage.scala:60:14] input io_com_uops_2_ctrl_fcn_dw, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_2_ctrl_csr_cmd, // @[rename-stage.scala:60:14] input io_com_uops_2_ctrl_is_load, // @[rename-stage.scala:60:14] input io_com_uops_2_ctrl_is_sta, // @[rename-stage.scala:60:14] input io_com_uops_2_ctrl_is_std, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_2_iw_state, // @[rename-stage.scala:60:14] input io_com_uops_2_iw_p1_poisoned, // @[rename-stage.scala:60:14] input io_com_uops_2_iw_p2_poisoned, // @[rename-stage.scala:60:14] input io_com_uops_2_is_br, // @[rename-stage.scala:60:14] input io_com_uops_2_is_jalr, // @[rename-stage.scala:60:14] input io_com_uops_2_is_jal, // @[rename-stage.scala:60:14] input io_com_uops_2_is_sfb, // @[rename-stage.scala:60:14] input [15:0] io_com_uops_2_br_mask, // @[rename-stage.scala:60:14] input [3:0] io_com_uops_2_br_tag, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_2_ftq_idx, // @[rename-stage.scala:60:14] input io_com_uops_2_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_2_pc_lob, // @[rename-stage.scala:60:14] input io_com_uops_2_taken, // @[rename-stage.scala:60:14] input [19:0] io_com_uops_2_imm_packed, // @[rename-stage.scala:60:14] input [11:0] io_com_uops_2_csr_addr, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_2_rob_idx, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_2_ldq_idx, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_2_stq_idx, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_2_rxq_idx, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_2_pdst, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_2_prs1, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_2_prs2, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_2_prs3, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_2_ppred, // @[rename-stage.scala:60:14] input io_com_uops_2_prs1_busy, // @[rename-stage.scala:60:14] input io_com_uops_2_prs2_busy, // @[rename-stage.scala:60:14] input io_com_uops_2_prs3_busy, // @[rename-stage.scala:60:14] input io_com_uops_2_ppred_busy, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_2_stale_pdst, // @[rename-stage.scala:60:14] input io_com_uops_2_exception, // @[rename-stage.scala:60:14] input [63:0] io_com_uops_2_exc_cause, // @[rename-stage.scala:60:14] input io_com_uops_2_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_2_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_2_mem_size, // @[rename-stage.scala:60:14] input io_com_uops_2_mem_signed, // @[rename-stage.scala:60:14] input io_com_uops_2_is_fence, // @[rename-stage.scala:60:14] input io_com_uops_2_is_fencei, // @[rename-stage.scala:60:14] input io_com_uops_2_is_amo, // @[rename-stage.scala:60:14] input io_com_uops_2_uses_ldq, // @[rename-stage.scala:60:14] input io_com_uops_2_uses_stq, // @[rename-stage.scala:60:14] input io_com_uops_2_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_com_uops_2_is_unique, // @[rename-stage.scala:60:14] input io_com_uops_2_flush_on_commit, // @[rename-stage.scala:60:14] input io_com_uops_2_ldst_is_rs1, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_2_ldst, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_2_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_2_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_2_lrs3, // @[rename-stage.scala:60:14] input io_com_uops_2_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_2_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_2_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_2_lrs2_rtype, // @[rename-stage.scala:60:14] input io_com_uops_2_frs3_en, // @[rename-stage.scala:60:14] input io_com_uops_2_fp_val, // @[rename-stage.scala:60:14] input io_com_uops_2_fp_single, // @[rename-stage.scala:60:14] input io_com_uops_2_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_com_uops_2_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_com_uops_2_xcpt_ma_if, // @[rename-stage.scala:60:14] input io_com_uops_2_bp_debug_if, // @[rename-stage.scala:60:14] input io_com_uops_2_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_2_debug_fsrc, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_2_debug_tsrc, // @[rename-stage.scala:60:14] input io_rbk_valids_0, // @[rename-stage.scala:60:14] input io_rbk_valids_1, // @[rename-stage.scala:60:14] input io_rbk_valids_2, // @[rename-stage.scala:60:14] input io_rollback, // @[rename-stage.scala:60:14] input io_debug_rob_empty // @[rename-stage.scala:60:14] ); wire [1:0] next_uop_2_debug_tsrc; // @[rename-stage.scala:123:24] wire [1:0] next_uop_2_debug_fsrc; // @[rename-stage.scala:123:24] wire next_uop_2_bp_xcpt_if; // @[rename-stage.scala:123:24] wire next_uop_2_bp_debug_if; // @[rename-stage.scala:123:24] wire next_uop_2_xcpt_ma_if; // @[rename-stage.scala:123:24] wire next_uop_2_xcpt_ae_if; // @[rename-stage.scala:123:24] wire next_uop_2_xcpt_pf_if; // @[rename-stage.scala:123:24] wire next_uop_2_fp_single; // @[rename-stage.scala:123:24] wire next_uop_2_fp_val; // @[rename-stage.scala:123:24] wire next_uop_2_frs3_en; // @[rename-stage.scala:123:24] wire [1:0] next_uop_2_lrs2_rtype; // @[rename-stage.scala:123:24] wire [1:0] next_uop_2_lrs1_rtype; // @[rename-stage.scala:123:24] wire [1:0] next_uop_2_dst_rtype; // @[rename-stage.scala:123:24] wire next_uop_2_ldst_val; // @[rename-stage.scala:123:24] wire [5:0] next_uop_2_lrs3; // @[rename-stage.scala:123:24] wire [5:0] next_uop_2_lrs2; // @[rename-stage.scala:123:24] wire [5:0] next_uop_2_lrs1; // @[rename-stage.scala:123:24] wire [5:0] next_uop_2_ldst; // @[rename-stage.scala:123:24] wire next_uop_2_ldst_is_rs1; // @[rename-stage.scala:123:24] wire next_uop_2_flush_on_commit; // @[rename-stage.scala:123:24] wire next_uop_2_is_unique; // @[rename-stage.scala:123:24] wire next_uop_2_is_sys_pc2epc; // @[rename-stage.scala:123:24] wire next_uop_2_uses_stq; // @[rename-stage.scala:123:24] wire next_uop_2_uses_ldq; // @[rename-stage.scala:123:24] wire next_uop_2_is_amo; // @[rename-stage.scala:123:24] wire next_uop_2_is_fencei; // @[rename-stage.scala:123:24] wire next_uop_2_is_fence; // @[rename-stage.scala:123:24] wire next_uop_2_mem_signed; // @[rename-stage.scala:123:24] wire [1:0] next_uop_2_mem_size; // @[rename-stage.scala:123:24] wire [4:0] next_uop_2_mem_cmd; // @[rename-stage.scala:123:24] wire next_uop_2_bypassable; // @[rename-stage.scala:123:24] wire [63:0] next_uop_2_exc_cause; // @[rename-stage.scala:123:24] wire next_uop_2_exception; // @[rename-stage.scala:123:24] wire [6:0] next_uop_2_stale_pdst; // @[rename-stage.scala:123:24] wire next_uop_2_ppred_busy; // @[rename-stage.scala:123:24] wire next_uop_2_prs3_busy; // @[rename-stage.scala:123:24] wire next_uop_2_prs2_busy; // @[rename-stage.scala:123:24] wire next_uop_2_prs1_busy; // @[rename-stage.scala:123:24] wire [4:0] next_uop_2_ppred; // @[rename-stage.scala:123:24] wire [6:0] next_uop_2_prs3; // @[rename-stage.scala:123:24] wire [6:0] next_uop_2_prs2; // @[rename-stage.scala:123:24] wire [6:0] next_uop_2_prs1; // @[rename-stage.scala:123:24] wire [6:0] next_uop_2_pdst; // @[rename-stage.scala:123:24] wire [1:0] next_uop_2_rxq_idx; // @[rename-stage.scala:123:24] wire [4:0] next_uop_2_stq_idx; // @[rename-stage.scala:123:24] wire [4:0] next_uop_2_ldq_idx; // @[rename-stage.scala:123:24] wire [6:0] next_uop_2_rob_idx; // @[rename-stage.scala:123:24] wire [11:0] next_uop_2_csr_addr; // @[rename-stage.scala:123:24] wire [19:0] next_uop_2_imm_packed; // @[rename-stage.scala:123:24] wire next_uop_2_taken; // @[rename-stage.scala:123:24] wire [5:0] next_uop_2_pc_lob; // @[rename-stage.scala:123:24] wire next_uop_2_edge_inst; // @[rename-stage.scala:123:24] wire [4:0] next_uop_2_ftq_idx; // @[rename-stage.scala:123:24] wire [3:0] next_uop_2_br_tag; // @[rename-stage.scala:123:24] wire next_uop_2_is_sfb; // @[rename-stage.scala:123:24] wire next_uop_2_is_jal; // @[rename-stage.scala:123:24] wire next_uop_2_is_jalr; // @[rename-stage.scala:123:24] wire next_uop_2_is_br; // @[rename-stage.scala:123:24] wire next_uop_2_iw_p2_poisoned; // @[rename-stage.scala:123:24] wire next_uop_2_iw_p1_poisoned; // @[rename-stage.scala:123:24] wire [1:0] next_uop_2_iw_state; // @[rename-stage.scala:123:24] wire [9:0] next_uop_2_fu_code; // @[rename-stage.scala:123:24] wire [2:0] next_uop_2_iq_type; // @[rename-stage.scala:123:24] wire [39:0] next_uop_2_debug_pc; // @[rename-stage.scala:123:24] wire next_uop_2_is_rvc; // @[rename-stage.scala:123:24] wire [31:0] next_uop_2_debug_inst; // @[rename-stage.scala:123:24] wire [31:0] next_uop_2_inst; // @[rename-stage.scala:123:24] wire [6:0] next_uop_2_uopc; // @[rename-stage.scala:123:24] wire next_uop_2_ctrl_is_std; // @[rename-stage.scala:123:24] wire next_uop_2_ctrl_is_sta; // @[rename-stage.scala:123:24] wire next_uop_2_ctrl_is_load; // @[rename-stage.scala:123:24] wire [2:0] next_uop_2_ctrl_csr_cmd; // @[rename-stage.scala:123:24] wire next_uop_2_ctrl_fcn_dw; // @[rename-stage.scala:123:24] wire [4:0] next_uop_2_ctrl_op_fcn; // @[rename-stage.scala:123:24] wire [2:0] next_uop_2_ctrl_imm_sel; // @[rename-stage.scala:123:24] wire [2:0] next_uop_2_ctrl_op2_sel; // @[rename-stage.scala:123:24] wire [1:0] next_uop_2_ctrl_op1_sel; // @[rename-stage.scala:123:24] wire [3:0] next_uop_2_ctrl_br_type; // @[rename-stage.scala:123:24] wire [1:0] next_uop_1_debug_tsrc; // @[rename-stage.scala:123:24] wire [1:0] next_uop_1_debug_fsrc; // @[rename-stage.scala:123:24] wire next_uop_1_bp_xcpt_if; // @[rename-stage.scala:123:24] wire next_uop_1_bp_debug_if; // @[rename-stage.scala:123:24] wire next_uop_1_xcpt_ma_if; // @[rename-stage.scala:123:24] wire next_uop_1_xcpt_ae_if; // @[rename-stage.scala:123:24] wire next_uop_1_xcpt_pf_if; // @[rename-stage.scala:123:24] wire next_uop_1_fp_single; // @[rename-stage.scala:123:24] wire next_uop_1_fp_val; // @[rename-stage.scala:123:24] wire next_uop_1_frs3_en; // @[rename-stage.scala:123:24] wire [1:0] next_uop_1_lrs2_rtype; // @[rename-stage.scala:123:24] wire [1:0] next_uop_1_lrs1_rtype; // @[rename-stage.scala:123:24] wire [1:0] next_uop_1_dst_rtype; // @[rename-stage.scala:123:24] wire next_uop_1_ldst_val; // @[rename-stage.scala:123:24] wire [5:0] next_uop_1_lrs3; // @[rename-stage.scala:123:24] wire [5:0] next_uop_1_lrs2; // @[rename-stage.scala:123:24] wire [5:0] next_uop_1_lrs1; // @[rename-stage.scala:123:24] wire [5:0] next_uop_1_ldst; // @[rename-stage.scala:123:24] wire next_uop_1_ldst_is_rs1; // @[rename-stage.scala:123:24] wire next_uop_1_flush_on_commit; // @[rename-stage.scala:123:24] wire next_uop_1_is_unique; // @[rename-stage.scala:123:24] wire next_uop_1_is_sys_pc2epc; // @[rename-stage.scala:123:24] wire next_uop_1_uses_stq; // @[rename-stage.scala:123:24] wire next_uop_1_uses_ldq; // @[rename-stage.scala:123:24] wire next_uop_1_is_amo; // @[rename-stage.scala:123:24] wire next_uop_1_is_fencei; // @[rename-stage.scala:123:24] wire next_uop_1_is_fence; // @[rename-stage.scala:123:24] wire next_uop_1_mem_signed; // @[rename-stage.scala:123:24] wire [1:0] next_uop_1_mem_size; // @[rename-stage.scala:123:24] wire [4:0] next_uop_1_mem_cmd; // @[rename-stage.scala:123:24] wire next_uop_1_bypassable; // @[rename-stage.scala:123:24] wire [63:0] next_uop_1_exc_cause; // @[rename-stage.scala:123:24] wire next_uop_1_exception; // @[rename-stage.scala:123:24] wire [6:0] next_uop_1_stale_pdst; // @[rename-stage.scala:123:24] wire next_uop_1_ppred_busy; // @[rename-stage.scala:123:24] wire next_uop_1_prs3_busy; // @[rename-stage.scala:123:24] wire next_uop_1_prs2_busy; // @[rename-stage.scala:123:24] wire next_uop_1_prs1_busy; // @[rename-stage.scala:123:24] wire [4:0] next_uop_1_ppred; // @[rename-stage.scala:123:24] wire [6:0] next_uop_1_prs3; // @[rename-stage.scala:123:24] wire [6:0] next_uop_1_prs2; // @[rename-stage.scala:123:24] wire [6:0] next_uop_1_prs1; // @[rename-stage.scala:123:24] wire [6:0] next_uop_1_pdst; // @[rename-stage.scala:123:24] wire [1:0] next_uop_1_rxq_idx; // @[rename-stage.scala:123:24] wire [4:0] next_uop_1_stq_idx; // @[rename-stage.scala:123:24] wire [4:0] next_uop_1_ldq_idx; // @[rename-stage.scala:123:24] wire [6:0] next_uop_1_rob_idx; // @[rename-stage.scala:123:24] wire [11:0] next_uop_1_csr_addr; // @[rename-stage.scala:123:24] wire [19:0] next_uop_1_imm_packed; // @[rename-stage.scala:123:24] wire next_uop_1_taken; // @[rename-stage.scala:123:24] wire [5:0] next_uop_1_pc_lob; // @[rename-stage.scala:123:24] wire next_uop_1_edge_inst; // @[rename-stage.scala:123:24] wire [4:0] next_uop_1_ftq_idx; // @[rename-stage.scala:123:24] wire [3:0] next_uop_1_br_tag; // @[rename-stage.scala:123:24] wire next_uop_1_is_sfb; // @[rename-stage.scala:123:24] wire next_uop_1_is_jal; // @[rename-stage.scala:123:24] wire next_uop_1_is_jalr; // @[rename-stage.scala:123:24] wire next_uop_1_is_br; // @[rename-stage.scala:123:24] wire next_uop_1_iw_p2_poisoned; // @[rename-stage.scala:123:24] wire next_uop_1_iw_p1_poisoned; // @[rename-stage.scala:123:24] wire [1:0] next_uop_1_iw_state; // @[rename-stage.scala:123:24] wire [9:0] next_uop_1_fu_code; // @[rename-stage.scala:123:24] wire [2:0] next_uop_1_iq_type; // @[rename-stage.scala:123:24] wire [39:0] next_uop_1_debug_pc; // @[rename-stage.scala:123:24] wire next_uop_1_is_rvc; // @[rename-stage.scala:123:24] wire [31:0] next_uop_1_debug_inst; // @[rename-stage.scala:123:24] wire [31:0] next_uop_1_inst; // @[rename-stage.scala:123:24] wire [6:0] next_uop_1_uopc; // @[rename-stage.scala:123:24] wire next_uop_1_ctrl_is_std; // @[rename-stage.scala:123:24] wire next_uop_1_ctrl_is_sta; // @[rename-stage.scala:123:24] wire next_uop_1_ctrl_is_load; // @[rename-stage.scala:123:24] wire [2:0] next_uop_1_ctrl_csr_cmd; // @[rename-stage.scala:123:24] wire next_uop_1_ctrl_fcn_dw; // @[rename-stage.scala:123:24] wire [4:0] next_uop_1_ctrl_op_fcn; // @[rename-stage.scala:123:24] wire [2:0] next_uop_1_ctrl_imm_sel; // @[rename-stage.scala:123:24] wire [2:0] next_uop_1_ctrl_op2_sel; // @[rename-stage.scala:123:24] wire [1:0] next_uop_1_ctrl_op1_sel; // @[rename-stage.scala:123:24] wire [3:0] next_uop_1_ctrl_br_type; // @[rename-stage.scala:123:24] wire [1:0] next_uop_debug_tsrc; // @[rename-stage.scala:123:24] wire [1:0] next_uop_debug_fsrc; // @[rename-stage.scala:123:24] wire next_uop_bp_xcpt_if; // @[rename-stage.scala:123:24] wire next_uop_bp_debug_if; // @[rename-stage.scala:123:24] wire next_uop_xcpt_ma_if; // @[rename-stage.scala:123:24] wire next_uop_xcpt_ae_if; // @[rename-stage.scala:123:24] wire next_uop_xcpt_pf_if; // @[rename-stage.scala:123:24] wire next_uop_fp_single; // @[rename-stage.scala:123:24] wire next_uop_fp_val; // @[rename-stage.scala:123:24] wire next_uop_frs3_en; // @[rename-stage.scala:123:24] wire [1:0] next_uop_lrs2_rtype; // @[rename-stage.scala:123:24] wire [1:0] next_uop_lrs1_rtype; // @[rename-stage.scala:123:24] wire [1:0] next_uop_dst_rtype; // @[rename-stage.scala:123:24] wire next_uop_ldst_val; // @[rename-stage.scala:123:24] wire [5:0] next_uop_lrs3; // @[rename-stage.scala:123:24] wire [5:0] next_uop_lrs2; // @[rename-stage.scala:123:24] wire [5:0] next_uop_lrs1; // @[rename-stage.scala:123:24] wire [5:0] next_uop_ldst; // @[rename-stage.scala:123:24] wire next_uop_ldst_is_rs1; // @[rename-stage.scala:123:24] wire next_uop_flush_on_commit; // @[rename-stage.scala:123:24] wire next_uop_is_unique; // @[rename-stage.scala:123:24] wire next_uop_is_sys_pc2epc; // @[rename-stage.scala:123:24] wire next_uop_uses_stq; // @[rename-stage.scala:123:24] wire next_uop_uses_ldq; // @[rename-stage.scala:123:24] wire next_uop_is_amo; // @[rename-stage.scala:123:24] wire next_uop_is_fencei; // @[rename-stage.scala:123:24] wire next_uop_is_fence; // @[rename-stage.scala:123:24] wire next_uop_mem_signed; // @[rename-stage.scala:123:24] wire [1:0] next_uop_mem_size; // @[rename-stage.scala:123:24] wire [4:0] next_uop_mem_cmd; // @[rename-stage.scala:123:24] wire next_uop_bypassable; // @[rename-stage.scala:123:24] wire [63:0] next_uop_exc_cause; // @[rename-stage.scala:123:24] wire next_uop_exception; // @[rename-stage.scala:123:24] wire [6:0] next_uop_stale_pdst; // @[rename-stage.scala:123:24] wire next_uop_ppred_busy; // @[rename-stage.scala:123:24] wire next_uop_prs3_busy; // @[rename-stage.scala:123:24] wire next_uop_prs2_busy; // @[rename-stage.scala:123:24] wire next_uop_prs1_busy; // @[rename-stage.scala:123:24] wire [4:0] next_uop_ppred; // @[rename-stage.scala:123:24] wire [6:0] next_uop_prs3; // @[rename-stage.scala:123:24] wire [6:0] next_uop_prs2; // @[rename-stage.scala:123:24] wire [6:0] next_uop_prs1; // @[rename-stage.scala:123:24] wire [6:0] next_uop_pdst; // @[rename-stage.scala:123:24] wire [1:0] next_uop_rxq_idx; // @[rename-stage.scala:123:24] wire [4:0] next_uop_stq_idx; // @[rename-stage.scala:123:24] wire [4:0] next_uop_ldq_idx; // @[rename-stage.scala:123:24] wire [6:0] next_uop_rob_idx; // @[rename-stage.scala:123:24] wire [11:0] next_uop_csr_addr; // @[rename-stage.scala:123:24] wire [19:0] next_uop_imm_packed; // @[rename-stage.scala:123:24] wire next_uop_taken; // @[rename-stage.scala:123:24] wire [5:0] next_uop_pc_lob; // @[rename-stage.scala:123:24] wire next_uop_edge_inst; // @[rename-stage.scala:123:24] wire [4:0] next_uop_ftq_idx; // @[rename-stage.scala:123:24] wire [3:0] next_uop_br_tag; // @[rename-stage.scala:123:24] wire next_uop_is_sfb; // @[rename-stage.scala:123:24] wire next_uop_is_jal; // @[rename-stage.scala:123:24] wire next_uop_is_jalr; // @[rename-stage.scala:123:24] wire next_uop_is_br; // @[rename-stage.scala:123:24] wire next_uop_iw_p2_poisoned; // @[rename-stage.scala:123:24] wire next_uop_iw_p1_poisoned; // @[rename-stage.scala:123:24] wire [1:0] next_uop_iw_state; // @[rename-stage.scala:123:24] wire [9:0] next_uop_fu_code; // @[rename-stage.scala:123:24] wire [2:0] next_uop_iq_type; // @[rename-stage.scala:123:24] wire [39:0] next_uop_debug_pc; // @[rename-stage.scala:123:24] wire next_uop_is_rvc; // @[rename-stage.scala:123:24] wire [31:0] next_uop_debug_inst; // @[rename-stage.scala:123:24] wire [31:0] next_uop_inst; // @[rename-stage.scala:123:24] wire [6:0] next_uop_uopc; // @[rename-stage.scala:123:24] wire next_uop_ctrl_is_std; // @[rename-stage.scala:123:24] wire next_uop_ctrl_is_sta; // @[rename-stage.scala:123:24] wire next_uop_ctrl_is_load; // @[rename-stage.scala:123:24] wire [2:0] next_uop_ctrl_csr_cmd; // @[rename-stage.scala:123:24] wire next_uop_ctrl_fcn_dw; // @[rename-stage.scala:123:24] wire [4:0] next_uop_ctrl_op_fcn; // @[rename-stage.scala:123:24] wire [2:0] next_uop_ctrl_imm_sel; // @[rename-stage.scala:123:24] wire [2:0] next_uop_ctrl_op2_sel; // @[rename-stage.scala:123:24] wire [1:0] next_uop_ctrl_op1_sel; // @[rename-stage.scala:123:24] wire [3:0] next_uop_ctrl_br_type; // @[rename-stage.scala:123:24] wire io_kill_0 = io_kill; // @[rename-stage.scala:356:7] wire io_dec_fire_0_0 = io_dec_fire_0; // @[rename-stage.scala:356:7] wire io_dec_fire_1_0 = io_dec_fire_1; // @[rename-stage.scala:356:7] wire io_dec_fire_2_0 = io_dec_fire_2; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_0_uopc_0 = io_dec_uops_0_uopc; // @[rename-stage.scala:356:7] wire [31:0] io_dec_uops_0_inst_0 = io_dec_uops_0_inst; // @[rename-stage.scala:356:7] wire [31:0] io_dec_uops_0_debug_inst_0 = io_dec_uops_0_debug_inst; // @[rename-stage.scala:356:7] wire io_dec_uops_0_is_rvc_0 = io_dec_uops_0_is_rvc; // @[rename-stage.scala:356:7] wire [39:0] io_dec_uops_0_debug_pc_0 = io_dec_uops_0_debug_pc; // @[rename-stage.scala:356:7] wire [2:0] io_dec_uops_0_iq_type_0 = io_dec_uops_0_iq_type; // @[rename-stage.scala:356:7] wire [9:0] io_dec_uops_0_fu_code_0 = io_dec_uops_0_fu_code; // @[rename-stage.scala:356:7] wire io_dec_uops_0_is_br_0 = io_dec_uops_0_is_br; // @[rename-stage.scala:356:7] wire io_dec_uops_0_is_jalr_0 = io_dec_uops_0_is_jalr; // @[rename-stage.scala:356:7] wire io_dec_uops_0_is_jal_0 = io_dec_uops_0_is_jal; // @[rename-stage.scala:356:7] wire io_dec_uops_0_is_sfb_0 = io_dec_uops_0_is_sfb; // @[rename-stage.scala:356:7] wire [15:0] io_dec_uops_0_br_mask_0 = io_dec_uops_0_br_mask; // @[rename-stage.scala:356:7] wire [3:0] io_dec_uops_0_br_tag_0 = io_dec_uops_0_br_tag; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_0_ftq_idx_0 = io_dec_uops_0_ftq_idx; // @[rename-stage.scala:356:7] wire io_dec_uops_0_edge_inst_0 = io_dec_uops_0_edge_inst; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_0_pc_lob_0 = io_dec_uops_0_pc_lob; // @[rename-stage.scala:356:7] wire io_dec_uops_0_taken_0 = io_dec_uops_0_taken; // @[rename-stage.scala:356:7] wire [19:0] io_dec_uops_0_imm_packed_0 = io_dec_uops_0_imm_packed; // @[rename-stage.scala:356:7] wire io_dec_uops_0_exception_0 = io_dec_uops_0_exception; // @[rename-stage.scala:356:7] wire [63:0] io_dec_uops_0_exc_cause_0 = io_dec_uops_0_exc_cause; // @[rename-stage.scala:356:7] wire io_dec_uops_0_bypassable_0 = io_dec_uops_0_bypassable; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_0_mem_cmd_0 = io_dec_uops_0_mem_cmd; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_0_mem_size_0 = io_dec_uops_0_mem_size; // @[rename-stage.scala:356:7] wire io_dec_uops_0_mem_signed_0 = io_dec_uops_0_mem_signed; // @[rename-stage.scala:356:7] wire io_dec_uops_0_is_fence_0 = io_dec_uops_0_is_fence; // @[rename-stage.scala:356:7] wire io_dec_uops_0_is_fencei_0 = io_dec_uops_0_is_fencei; // @[rename-stage.scala:356:7] wire io_dec_uops_0_is_amo_0 = io_dec_uops_0_is_amo; // @[rename-stage.scala:356:7] wire io_dec_uops_0_uses_ldq_0 = io_dec_uops_0_uses_ldq; // @[rename-stage.scala:356:7] wire io_dec_uops_0_uses_stq_0 = io_dec_uops_0_uses_stq; // @[rename-stage.scala:356:7] wire io_dec_uops_0_is_sys_pc2epc_0 = io_dec_uops_0_is_sys_pc2epc; // @[rename-stage.scala:356:7] wire io_dec_uops_0_is_unique_0 = io_dec_uops_0_is_unique; // @[rename-stage.scala:356:7] wire io_dec_uops_0_flush_on_commit_0 = io_dec_uops_0_flush_on_commit; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_0_ldst_0 = io_dec_uops_0_ldst; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_0_lrs1_0 = io_dec_uops_0_lrs1; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_0_lrs2_0 = io_dec_uops_0_lrs2; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_0_lrs3_0 = io_dec_uops_0_lrs3; // @[rename-stage.scala:356:7] wire io_dec_uops_0_ldst_val_0 = io_dec_uops_0_ldst_val; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_0_dst_rtype_0 = io_dec_uops_0_dst_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_0_lrs1_rtype_0 = io_dec_uops_0_lrs1_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_0_lrs2_rtype_0 = io_dec_uops_0_lrs2_rtype; // @[rename-stage.scala:356:7] wire io_dec_uops_0_frs3_en_0 = io_dec_uops_0_frs3_en; // @[rename-stage.scala:356:7] wire io_dec_uops_0_fp_val_0 = io_dec_uops_0_fp_val; // @[rename-stage.scala:356:7] wire io_dec_uops_0_fp_single_0 = io_dec_uops_0_fp_single; // @[rename-stage.scala:356:7] wire io_dec_uops_0_xcpt_pf_if_0 = io_dec_uops_0_xcpt_pf_if; // @[rename-stage.scala:356:7] wire io_dec_uops_0_xcpt_ae_if_0 = io_dec_uops_0_xcpt_ae_if; // @[rename-stage.scala:356:7] wire io_dec_uops_0_bp_debug_if_0 = io_dec_uops_0_bp_debug_if; // @[rename-stage.scala:356:7] wire io_dec_uops_0_bp_xcpt_if_0 = io_dec_uops_0_bp_xcpt_if; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_0_debug_fsrc_0 = io_dec_uops_0_debug_fsrc; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_1_uopc_0 = io_dec_uops_1_uopc; // @[rename-stage.scala:356:7] wire [31:0] io_dec_uops_1_inst_0 = io_dec_uops_1_inst; // @[rename-stage.scala:356:7] wire [31:0] io_dec_uops_1_debug_inst_0 = io_dec_uops_1_debug_inst; // @[rename-stage.scala:356:7] wire io_dec_uops_1_is_rvc_0 = io_dec_uops_1_is_rvc; // @[rename-stage.scala:356:7] wire [39:0] io_dec_uops_1_debug_pc_0 = io_dec_uops_1_debug_pc; // @[rename-stage.scala:356:7] wire [2:0] io_dec_uops_1_iq_type_0 = io_dec_uops_1_iq_type; // @[rename-stage.scala:356:7] wire [9:0] io_dec_uops_1_fu_code_0 = io_dec_uops_1_fu_code; // @[rename-stage.scala:356:7] wire io_dec_uops_1_is_br_0 = io_dec_uops_1_is_br; // @[rename-stage.scala:356:7] wire io_dec_uops_1_is_jalr_0 = io_dec_uops_1_is_jalr; // @[rename-stage.scala:356:7] wire io_dec_uops_1_is_jal_0 = io_dec_uops_1_is_jal; // @[rename-stage.scala:356:7] wire io_dec_uops_1_is_sfb_0 = io_dec_uops_1_is_sfb; // @[rename-stage.scala:356:7] wire [15:0] io_dec_uops_1_br_mask_0 = io_dec_uops_1_br_mask; // @[rename-stage.scala:356:7] wire [3:0] io_dec_uops_1_br_tag_0 = io_dec_uops_1_br_tag; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_1_ftq_idx_0 = io_dec_uops_1_ftq_idx; // @[rename-stage.scala:356:7] wire io_dec_uops_1_edge_inst_0 = io_dec_uops_1_edge_inst; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_1_pc_lob_0 = io_dec_uops_1_pc_lob; // @[rename-stage.scala:356:7] wire io_dec_uops_1_taken_0 = io_dec_uops_1_taken; // @[rename-stage.scala:356:7] wire [19:0] io_dec_uops_1_imm_packed_0 = io_dec_uops_1_imm_packed; // @[rename-stage.scala:356:7] wire io_dec_uops_1_exception_0 = io_dec_uops_1_exception; // @[rename-stage.scala:356:7] wire [63:0] io_dec_uops_1_exc_cause_0 = io_dec_uops_1_exc_cause; // @[rename-stage.scala:356:7] wire io_dec_uops_1_bypassable_0 = io_dec_uops_1_bypassable; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_1_mem_cmd_0 = io_dec_uops_1_mem_cmd; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_1_mem_size_0 = io_dec_uops_1_mem_size; // @[rename-stage.scala:356:7] wire io_dec_uops_1_mem_signed_0 = io_dec_uops_1_mem_signed; // @[rename-stage.scala:356:7] wire io_dec_uops_1_is_fence_0 = io_dec_uops_1_is_fence; // @[rename-stage.scala:356:7] wire io_dec_uops_1_is_fencei_0 = io_dec_uops_1_is_fencei; // @[rename-stage.scala:356:7] wire io_dec_uops_1_is_amo_0 = io_dec_uops_1_is_amo; // @[rename-stage.scala:356:7] wire io_dec_uops_1_uses_ldq_0 = io_dec_uops_1_uses_ldq; // @[rename-stage.scala:356:7] wire io_dec_uops_1_uses_stq_0 = io_dec_uops_1_uses_stq; // @[rename-stage.scala:356:7] wire io_dec_uops_1_is_sys_pc2epc_0 = io_dec_uops_1_is_sys_pc2epc; // @[rename-stage.scala:356:7] wire io_dec_uops_1_is_unique_0 = io_dec_uops_1_is_unique; // @[rename-stage.scala:356:7] wire io_dec_uops_1_flush_on_commit_0 = io_dec_uops_1_flush_on_commit; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_1_ldst_0 = io_dec_uops_1_ldst; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_1_lrs1_0 = io_dec_uops_1_lrs1; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_1_lrs2_0 = io_dec_uops_1_lrs2; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_1_lrs3_0 = io_dec_uops_1_lrs3; // @[rename-stage.scala:356:7] wire io_dec_uops_1_ldst_val_0 = io_dec_uops_1_ldst_val; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_1_dst_rtype_0 = io_dec_uops_1_dst_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_1_lrs1_rtype_0 = io_dec_uops_1_lrs1_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_1_lrs2_rtype_0 = io_dec_uops_1_lrs2_rtype; // @[rename-stage.scala:356:7] wire io_dec_uops_1_frs3_en_0 = io_dec_uops_1_frs3_en; // @[rename-stage.scala:356:7] wire io_dec_uops_1_fp_val_0 = io_dec_uops_1_fp_val; // @[rename-stage.scala:356:7] wire io_dec_uops_1_fp_single_0 = io_dec_uops_1_fp_single; // @[rename-stage.scala:356:7] wire io_dec_uops_1_xcpt_pf_if_0 = io_dec_uops_1_xcpt_pf_if; // @[rename-stage.scala:356:7] wire io_dec_uops_1_xcpt_ae_if_0 = io_dec_uops_1_xcpt_ae_if; // @[rename-stage.scala:356:7] wire io_dec_uops_1_bp_debug_if_0 = io_dec_uops_1_bp_debug_if; // @[rename-stage.scala:356:7] wire io_dec_uops_1_bp_xcpt_if_0 = io_dec_uops_1_bp_xcpt_if; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_1_debug_fsrc_0 = io_dec_uops_1_debug_fsrc; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_2_uopc_0 = io_dec_uops_2_uopc; // @[rename-stage.scala:356:7] wire [31:0] io_dec_uops_2_inst_0 = io_dec_uops_2_inst; // @[rename-stage.scala:356:7] wire [31:0] io_dec_uops_2_debug_inst_0 = io_dec_uops_2_debug_inst; // @[rename-stage.scala:356:7] wire io_dec_uops_2_is_rvc_0 = io_dec_uops_2_is_rvc; // @[rename-stage.scala:356:7] wire [39:0] io_dec_uops_2_debug_pc_0 = io_dec_uops_2_debug_pc; // @[rename-stage.scala:356:7] wire [2:0] io_dec_uops_2_iq_type_0 = io_dec_uops_2_iq_type; // @[rename-stage.scala:356:7] wire [9:0] io_dec_uops_2_fu_code_0 = io_dec_uops_2_fu_code; // @[rename-stage.scala:356:7] wire io_dec_uops_2_is_br_0 = io_dec_uops_2_is_br; // @[rename-stage.scala:356:7] wire io_dec_uops_2_is_jalr_0 = io_dec_uops_2_is_jalr; // @[rename-stage.scala:356:7] wire io_dec_uops_2_is_jal_0 = io_dec_uops_2_is_jal; // @[rename-stage.scala:356:7] wire io_dec_uops_2_is_sfb_0 = io_dec_uops_2_is_sfb; // @[rename-stage.scala:356:7] wire [15:0] io_dec_uops_2_br_mask_0 = io_dec_uops_2_br_mask; // @[rename-stage.scala:356:7] wire [3:0] io_dec_uops_2_br_tag_0 = io_dec_uops_2_br_tag; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_2_ftq_idx_0 = io_dec_uops_2_ftq_idx; // @[rename-stage.scala:356:7] wire io_dec_uops_2_edge_inst_0 = io_dec_uops_2_edge_inst; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_2_pc_lob_0 = io_dec_uops_2_pc_lob; // @[rename-stage.scala:356:7] wire io_dec_uops_2_taken_0 = io_dec_uops_2_taken; // @[rename-stage.scala:356:7] wire [19:0] io_dec_uops_2_imm_packed_0 = io_dec_uops_2_imm_packed; // @[rename-stage.scala:356:7] wire io_dec_uops_2_exception_0 = io_dec_uops_2_exception; // @[rename-stage.scala:356:7] wire [63:0] io_dec_uops_2_exc_cause_0 = io_dec_uops_2_exc_cause; // @[rename-stage.scala:356:7] wire io_dec_uops_2_bypassable_0 = io_dec_uops_2_bypassable; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_2_mem_cmd_0 = io_dec_uops_2_mem_cmd; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_2_mem_size_0 = io_dec_uops_2_mem_size; // @[rename-stage.scala:356:7] wire io_dec_uops_2_mem_signed_0 = io_dec_uops_2_mem_signed; // @[rename-stage.scala:356:7] wire io_dec_uops_2_is_fence_0 = io_dec_uops_2_is_fence; // @[rename-stage.scala:356:7] wire io_dec_uops_2_is_fencei_0 = io_dec_uops_2_is_fencei; // @[rename-stage.scala:356:7] wire io_dec_uops_2_is_amo_0 = io_dec_uops_2_is_amo; // @[rename-stage.scala:356:7] wire io_dec_uops_2_uses_ldq_0 = io_dec_uops_2_uses_ldq; // @[rename-stage.scala:356:7] wire io_dec_uops_2_uses_stq_0 = io_dec_uops_2_uses_stq; // @[rename-stage.scala:356:7] wire io_dec_uops_2_is_sys_pc2epc_0 = io_dec_uops_2_is_sys_pc2epc; // @[rename-stage.scala:356:7] wire io_dec_uops_2_is_unique_0 = io_dec_uops_2_is_unique; // @[rename-stage.scala:356:7] wire io_dec_uops_2_flush_on_commit_0 = io_dec_uops_2_flush_on_commit; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_2_ldst_0 = io_dec_uops_2_ldst; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_2_lrs1_0 = io_dec_uops_2_lrs1; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_2_lrs2_0 = io_dec_uops_2_lrs2; // @[rename-stage.scala:356:7] wire [5:0] io_dec_uops_2_lrs3_0 = io_dec_uops_2_lrs3; // @[rename-stage.scala:356:7] wire io_dec_uops_2_ldst_val_0 = io_dec_uops_2_ldst_val; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_2_dst_rtype_0 = io_dec_uops_2_dst_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_2_lrs1_rtype_0 = io_dec_uops_2_lrs1_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_2_lrs2_rtype_0 = io_dec_uops_2_lrs2_rtype; // @[rename-stage.scala:356:7] wire io_dec_uops_2_frs3_en_0 = io_dec_uops_2_frs3_en; // @[rename-stage.scala:356:7] wire io_dec_uops_2_fp_val_0 = io_dec_uops_2_fp_val; // @[rename-stage.scala:356:7] wire io_dec_uops_2_fp_single_0 = io_dec_uops_2_fp_single; // @[rename-stage.scala:356:7] wire io_dec_uops_2_xcpt_pf_if_0 = io_dec_uops_2_xcpt_pf_if; // @[rename-stage.scala:356:7] wire io_dec_uops_2_xcpt_ae_if_0 = io_dec_uops_2_xcpt_ae_if; // @[rename-stage.scala:356:7] wire io_dec_uops_2_bp_debug_if_0 = io_dec_uops_2_bp_debug_if; // @[rename-stage.scala:356:7] wire io_dec_uops_2_bp_xcpt_if_0 = io_dec_uops_2_bp_xcpt_if; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_2_debug_fsrc_0 = io_dec_uops_2_debug_fsrc; // @[rename-stage.scala:356:7] wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[rename-stage.scala:356:7] wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[rename-stage.scala:356:7] wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[rename-stage.scala:356:7] wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[rename-stage.scala:356:7] wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[rename-stage.scala:356:7] wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[rename-stage.scala:356:7] wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[rename-stage.scala:356:7] wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[rename-stage.scala:356:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[rename-stage.scala:356:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[rename-stage.scala:356:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[rename-stage.scala:356:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[rename-stage.scala:356:7] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[rename-stage.scala:356:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[rename-stage.scala:356:7] wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[rename-stage.scala:356:7] wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[rename-stage.scala:356:7] wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[rename-stage.scala:356:7] wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[rename-stage.scala:356:7] wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[rename-stage.scala:356:7] wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[rename-stage.scala:356:7] wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[rename-stage.scala:356:7] wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[rename-stage.scala:356:7] wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[rename-stage.scala:356:7] wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[rename-stage.scala:356:7] wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[rename-stage.scala:356:7] wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[rename-stage.scala:356:7] wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[rename-stage.scala:356:7] wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[rename-stage.scala:356:7] wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[rename-stage.scala:356:7] wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[rename-stage.scala:356:7] wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[rename-stage.scala:356:7] wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[rename-stage.scala:356:7] wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[rename-stage.scala:356:7] wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[rename-stage.scala:356:7] wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[rename-stage.scala:356:7] wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[rename-stage.scala:356:7] wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[rename-stage.scala:356:7] wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[rename-stage.scala:356:7] wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[rename-stage.scala:356:7] wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[rename-stage.scala:356:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[rename-stage.scala:356:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[rename-stage.scala:356:7] wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[rename-stage.scala:356:7] wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[rename-stage.scala:356:7] wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[rename-stage.scala:356:7] wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[rename-stage.scala:356:7] wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[rename-stage.scala:356:7] wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[rename-stage.scala:356:7] wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[rename-stage.scala:356:7] wire io_dis_fire_0_0 = io_dis_fire_0; // @[rename-stage.scala:356:7] wire io_dis_fire_1_0 = io_dis_fire_1; // @[rename-stage.scala:356:7] wire io_dis_fire_2_0 = io_dis_fire_2; // @[rename-stage.scala:356:7] wire io_dis_ready_0 = io_dis_ready; // @[rename-stage.scala:356:7] wire io_com_valids_0_0 = io_com_valids_0; // @[rename-stage.scala:356:7] wire io_com_valids_1_0 = io_com_valids_1; // @[rename-stage.scala:356:7] wire io_com_valids_2_0 = io_com_valids_2; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_0_uopc_0 = io_com_uops_0_uopc; // @[rename-stage.scala:356:7] wire [31:0] io_com_uops_0_inst_0 = io_com_uops_0_inst; // @[rename-stage.scala:356:7] wire [31:0] io_com_uops_0_debug_inst_0 = io_com_uops_0_debug_inst; // @[rename-stage.scala:356:7] wire io_com_uops_0_is_rvc_0 = io_com_uops_0_is_rvc; // @[rename-stage.scala:356:7] wire [39:0] io_com_uops_0_debug_pc_0 = io_com_uops_0_debug_pc; // @[rename-stage.scala:356:7] wire [2:0] io_com_uops_0_iq_type_0 = io_com_uops_0_iq_type; // @[rename-stage.scala:356:7] wire [9:0] io_com_uops_0_fu_code_0 = io_com_uops_0_fu_code; // @[rename-stage.scala:356:7] wire [3:0] io_com_uops_0_ctrl_br_type_0 = io_com_uops_0_ctrl_br_type; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_0_ctrl_op1_sel_0 = io_com_uops_0_ctrl_op1_sel; // @[rename-stage.scala:356:7] wire [2:0] io_com_uops_0_ctrl_op2_sel_0 = io_com_uops_0_ctrl_op2_sel; // @[rename-stage.scala:356:7] wire [2:0] io_com_uops_0_ctrl_imm_sel_0 = io_com_uops_0_ctrl_imm_sel; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_0_ctrl_op_fcn_0 = io_com_uops_0_ctrl_op_fcn; // @[rename-stage.scala:356:7] wire io_com_uops_0_ctrl_fcn_dw_0 = io_com_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:356:7] wire [2:0] io_com_uops_0_ctrl_csr_cmd_0 = io_com_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:356:7] wire io_com_uops_0_ctrl_is_load_0 = io_com_uops_0_ctrl_is_load; // @[rename-stage.scala:356:7] wire io_com_uops_0_ctrl_is_sta_0 = io_com_uops_0_ctrl_is_sta; // @[rename-stage.scala:356:7] wire io_com_uops_0_ctrl_is_std_0 = io_com_uops_0_ctrl_is_std; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_0_iw_state_0 = io_com_uops_0_iw_state; // @[rename-stage.scala:356:7] wire io_com_uops_0_iw_p1_poisoned_0 = io_com_uops_0_iw_p1_poisoned; // @[rename-stage.scala:356:7] wire io_com_uops_0_iw_p2_poisoned_0 = io_com_uops_0_iw_p2_poisoned; // @[rename-stage.scala:356:7] wire io_com_uops_0_is_br_0 = io_com_uops_0_is_br; // @[rename-stage.scala:356:7] wire io_com_uops_0_is_jalr_0 = io_com_uops_0_is_jalr; // @[rename-stage.scala:356:7] wire io_com_uops_0_is_jal_0 = io_com_uops_0_is_jal; // @[rename-stage.scala:356:7] wire io_com_uops_0_is_sfb_0 = io_com_uops_0_is_sfb; // @[rename-stage.scala:356:7] wire [15:0] io_com_uops_0_br_mask_0 = io_com_uops_0_br_mask; // @[rename-stage.scala:356:7] wire [3:0] io_com_uops_0_br_tag_0 = io_com_uops_0_br_tag; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_0_ftq_idx_0 = io_com_uops_0_ftq_idx; // @[rename-stage.scala:356:7] wire io_com_uops_0_edge_inst_0 = io_com_uops_0_edge_inst; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_0_pc_lob_0 = io_com_uops_0_pc_lob; // @[rename-stage.scala:356:7] wire io_com_uops_0_taken_0 = io_com_uops_0_taken; // @[rename-stage.scala:356:7] wire [19:0] io_com_uops_0_imm_packed_0 = io_com_uops_0_imm_packed; // @[rename-stage.scala:356:7] wire [11:0] io_com_uops_0_csr_addr_0 = io_com_uops_0_csr_addr; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_0_rob_idx_0 = io_com_uops_0_rob_idx; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_0_ldq_idx_0 = io_com_uops_0_ldq_idx; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_0_stq_idx_0 = io_com_uops_0_stq_idx; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_0_rxq_idx_0 = io_com_uops_0_rxq_idx; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_0_pdst_0 = io_com_uops_0_pdst; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_0_prs1_0 = io_com_uops_0_prs1; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_0_prs2_0 = io_com_uops_0_prs2; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_0_prs3_0 = io_com_uops_0_prs3; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_0_ppred_0 = io_com_uops_0_ppred; // @[rename-stage.scala:356:7] wire io_com_uops_0_prs1_busy_0 = io_com_uops_0_prs1_busy; // @[rename-stage.scala:356:7] wire io_com_uops_0_prs2_busy_0 = io_com_uops_0_prs2_busy; // @[rename-stage.scala:356:7] wire io_com_uops_0_prs3_busy_0 = io_com_uops_0_prs3_busy; // @[rename-stage.scala:356:7] wire io_com_uops_0_ppred_busy_0 = io_com_uops_0_ppred_busy; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_0_stale_pdst_0 = io_com_uops_0_stale_pdst; // @[rename-stage.scala:356:7] wire io_com_uops_0_exception_0 = io_com_uops_0_exception; // @[rename-stage.scala:356:7] wire [63:0] io_com_uops_0_exc_cause_0 = io_com_uops_0_exc_cause; // @[rename-stage.scala:356:7] wire io_com_uops_0_bypassable_0 = io_com_uops_0_bypassable; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_0_mem_cmd_0 = io_com_uops_0_mem_cmd; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_0_mem_size_0 = io_com_uops_0_mem_size; // @[rename-stage.scala:356:7] wire io_com_uops_0_mem_signed_0 = io_com_uops_0_mem_signed; // @[rename-stage.scala:356:7] wire io_com_uops_0_is_fence_0 = io_com_uops_0_is_fence; // @[rename-stage.scala:356:7] wire io_com_uops_0_is_fencei_0 = io_com_uops_0_is_fencei; // @[rename-stage.scala:356:7] wire io_com_uops_0_is_amo_0 = io_com_uops_0_is_amo; // @[rename-stage.scala:356:7] wire io_com_uops_0_uses_ldq_0 = io_com_uops_0_uses_ldq; // @[rename-stage.scala:356:7] wire io_com_uops_0_uses_stq_0 = io_com_uops_0_uses_stq; // @[rename-stage.scala:356:7] wire io_com_uops_0_is_sys_pc2epc_0 = io_com_uops_0_is_sys_pc2epc; // @[rename-stage.scala:356:7] wire io_com_uops_0_is_unique_0 = io_com_uops_0_is_unique; // @[rename-stage.scala:356:7] wire io_com_uops_0_flush_on_commit_0 = io_com_uops_0_flush_on_commit; // @[rename-stage.scala:356:7] wire io_com_uops_0_ldst_is_rs1_0 = io_com_uops_0_ldst_is_rs1; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_0_ldst_0 = io_com_uops_0_ldst; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_0_lrs1_0 = io_com_uops_0_lrs1; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_0_lrs2_0 = io_com_uops_0_lrs2; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_0_lrs3_0 = io_com_uops_0_lrs3; // @[rename-stage.scala:356:7] wire io_com_uops_0_ldst_val_0 = io_com_uops_0_ldst_val; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_0_dst_rtype_0 = io_com_uops_0_dst_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_0_lrs1_rtype_0 = io_com_uops_0_lrs1_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_0_lrs2_rtype_0 = io_com_uops_0_lrs2_rtype; // @[rename-stage.scala:356:7] wire io_com_uops_0_frs3_en_0 = io_com_uops_0_frs3_en; // @[rename-stage.scala:356:7] wire io_com_uops_0_fp_val_0 = io_com_uops_0_fp_val; // @[rename-stage.scala:356:7] wire io_com_uops_0_fp_single_0 = io_com_uops_0_fp_single; // @[rename-stage.scala:356:7] wire io_com_uops_0_xcpt_pf_if_0 = io_com_uops_0_xcpt_pf_if; // @[rename-stage.scala:356:7] wire io_com_uops_0_xcpt_ae_if_0 = io_com_uops_0_xcpt_ae_if; // @[rename-stage.scala:356:7] wire io_com_uops_0_xcpt_ma_if_0 = io_com_uops_0_xcpt_ma_if; // @[rename-stage.scala:356:7] wire io_com_uops_0_bp_debug_if_0 = io_com_uops_0_bp_debug_if; // @[rename-stage.scala:356:7] wire io_com_uops_0_bp_xcpt_if_0 = io_com_uops_0_bp_xcpt_if; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_0_debug_fsrc_0 = io_com_uops_0_debug_fsrc; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_0_debug_tsrc_0 = io_com_uops_0_debug_tsrc; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_1_uopc_0 = io_com_uops_1_uopc; // @[rename-stage.scala:356:7] wire [31:0] io_com_uops_1_inst_0 = io_com_uops_1_inst; // @[rename-stage.scala:356:7] wire [31:0] io_com_uops_1_debug_inst_0 = io_com_uops_1_debug_inst; // @[rename-stage.scala:356:7] wire io_com_uops_1_is_rvc_0 = io_com_uops_1_is_rvc; // @[rename-stage.scala:356:7] wire [39:0] io_com_uops_1_debug_pc_0 = io_com_uops_1_debug_pc; // @[rename-stage.scala:356:7] wire [2:0] io_com_uops_1_iq_type_0 = io_com_uops_1_iq_type; // @[rename-stage.scala:356:7] wire [9:0] io_com_uops_1_fu_code_0 = io_com_uops_1_fu_code; // @[rename-stage.scala:356:7] wire [3:0] io_com_uops_1_ctrl_br_type_0 = io_com_uops_1_ctrl_br_type; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_1_ctrl_op1_sel_0 = io_com_uops_1_ctrl_op1_sel; // @[rename-stage.scala:356:7] wire [2:0] io_com_uops_1_ctrl_op2_sel_0 = io_com_uops_1_ctrl_op2_sel; // @[rename-stage.scala:356:7] wire [2:0] io_com_uops_1_ctrl_imm_sel_0 = io_com_uops_1_ctrl_imm_sel; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_1_ctrl_op_fcn_0 = io_com_uops_1_ctrl_op_fcn; // @[rename-stage.scala:356:7] wire io_com_uops_1_ctrl_fcn_dw_0 = io_com_uops_1_ctrl_fcn_dw; // @[rename-stage.scala:356:7] wire [2:0] io_com_uops_1_ctrl_csr_cmd_0 = io_com_uops_1_ctrl_csr_cmd; // @[rename-stage.scala:356:7] wire io_com_uops_1_ctrl_is_load_0 = io_com_uops_1_ctrl_is_load; // @[rename-stage.scala:356:7] wire io_com_uops_1_ctrl_is_sta_0 = io_com_uops_1_ctrl_is_sta; // @[rename-stage.scala:356:7] wire io_com_uops_1_ctrl_is_std_0 = io_com_uops_1_ctrl_is_std; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_1_iw_state_0 = io_com_uops_1_iw_state; // @[rename-stage.scala:356:7] wire io_com_uops_1_iw_p1_poisoned_0 = io_com_uops_1_iw_p1_poisoned; // @[rename-stage.scala:356:7] wire io_com_uops_1_iw_p2_poisoned_0 = io_com_uops_1_iw_p2_poisoned; // @[rename-stage.scala:356:7] wire io_com_uops_1_is_br_0 = io_com_uops_1_is_br; // @[rename-stage.scala:356:7] wire io_com_uops_1_is_jalr_0 = io_com_uops_1_is_jalr; // @[rename-stage.scala:356:7] wire io_com_uops_1_is_jal_0 = io_com_uops_1_is_jal; // @[rename-stage.scala:356:7] wire io_com_uops_1_is_sfb_0 = io_com_uops_1_is_sfb; // @[rename-stage.scala:356:7] wire [15:0] io_com_uops_1_br_mask_0 = io_com_uops_1_br_mask; // @[rename-stage.scala:356:7] wire [3:0] io_com_uops_1_br_tag_0 = io_com_uops_1_br_tag; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_1_ftq_idx_0 = io_com_uops_1_ftq_idx; // @[rename-stage.scala:356:7] wire io_com_uops_1_edge_inst_0 = io_com_uops_1_edge_inst; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_1_pc_lob_0 = io_com_uops_1_pc_lob; // @[rename-stage.scala:356:7] wire io_com_uops_1_taken_0 = io_com_uops_1_taken; // @[rename-stage.scala:356:7] wire [19:0] io_com_uops_1_imm_packed_0 = io_com_uops_1_imm_packed; // @[rename-stage.scala:356:7] wire [11:0] io_com_uops_1_csr_addr_0 = io_com_uops_1_csr_addr; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_1_rob_idx_0 = io_com_uops_1_rob_idx; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_1_ldq_idx_0 = io_com_uops_1_ldq_idx; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_1_stq_idx_0 = io_com_uops_1_stq_idx; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_1_rxq_idx_0 = io_com_uops_1_rxq_idx; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_1_pdst_0 = io_com_uops_1_pdst; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_1_prs1_0 = io_com_uops_1_prs1; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_1_prs2_0 = io_com_uops_1_prs2; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_1_prs3_0 = io_com_uops_1_prs3; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_1_ppred_0 = io_com_uops_1_ppred; // @[rename-stage.scala:356:7] wire io_com_uops_1_prs1_busy_0 = io_com_uops_1_prs1_busy; // @[rename-stage.scala:356:7] wire io_com_uops_1_prs2_busy_0 = io_com_uops_1_prs2_busy; // @[rename-stage.scala:356:7] wire io_com_uops_1_prs3_busy_0 = io_com_uops_1_prs3_busy; // @[rename-stage.scala:356:7] wire io_com_uops_1_ppred_busy_0 = io_com_uops_1_ppred_busy; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_1_stale_pdst_0 = io_com_uops_1_stale_pdst; // @[rename-stage.scala:356:7] wire io_com_uops_1_exception_0 = io_com_uops_1_exception; // @[rename-stage.scala:356:7] wire [63:0] io_com_uops_1_exc_cause_0 = io_com_uops_1_exc_cause; // @[rename-stage.scala:356:7] wire io_com_uops_1_bypassable_0 = io_com_uops_1_bypassable; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_1_mem_cmd_0 = io_com_uops_1_mem_cmd; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_1_mem_size_0 = io_com_uops_1_mem_size; // @[rename-stage.scala:356:7] wire io_com_uops_1_mem_signed_0 = io_com_uops_1_mem_signed; // @[rename-stage.scala:356:7] wire io_com_uops_1_is_fence_0 = io_com_uops_1_is_fence; // @[rename-stage.scala:356:7] wire io_com_uops_1_is_fencei_0 = io_com_uops_1_is_fencei; // @[rename-stage.scala:356:7] wire io_com_uops_1_is_amo_0 = io_com_uops_1_is_amo; // @[rename-stage.scala:356:7] wire io_com_uops_1_uses_ldq_0 = io_com_uops_1_uses_ldq; // @[rename-stage.scala:356:7] wire io_com_uops_1_uses_stq_0 = io_com_uops_1_uses_stq; // @[rename-stage.scala:356:7] wire io_com_uops_1_is_sys_pc2epc_0 = io_com_uops_1_is_sys_pc2epc; // @[rename-stage.scala:356:7] wire io_com_uops_1_is_unique_0 = io_com_uops_1_is_unique; // @[rename-stage.scala:356:7] wire io_com_uops_1_flush_on_commit_0 = io_com_uops_1_flush_on_commit; // @[rename-stage.scala:356:7] wire io_com_uops_1_ldst_is_rs1_0 = io_com_uops_1_ldst_is_rs1; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_1_ldst_0 = io_com_uops_1_ldst; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_1_lrs1_0 = io_com_uops_1_lrs1; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_1_lrs2_0 = io_com_uops_1_lrs2; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_1_lrs3_0 = io_com_uops_1_lrs3; // @[rename-stage.scala:356:7] wire io_com_uops_1_ldst_val_0 = io_com_uops_1_ldst_val; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_1_dst_rtype_0 = io_com_uops_1_dst_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_1_lrs1_rtype_0 = io_com_uops_1_lrs1_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_1_lrs2_rtype_0 = io_com_uops_1_lrs2_rtype; // @[rename-stage.scala:356:7] wire io_com_uops_1_frs3_en_0 = io_com_uops_1_frs3_en; // @[rename-stage.scala:356:7] wire io_com_uops_1_fp_val_0 = io_com_uops_1_fp_val; // @[rename-stage.scala:356:7] wire io_com_uops_1_fp_single_0 = io_com_uops_1_fp_single; // @[rename-stage.scala:356:7] wire io_com_uops_1_xcpt_pf_if_0 = io_com_uops_1_xcpt_pf_if; // @[rename-stage.scala:356:7] wire io_com_uops_1_xcpt_ae_if_0 = io_com_uops_1_xcpt_ae_if; // @[rename-stage.scala:356:7] wire io_com_uops_1_xcpt_ma_if_0 = io_com_uops_1_xcpt_ma_if; // @[rename-stage.scala:356:7] wire io_com_uops_1_bp_debug_if_0 = io_com_uops_1_bp_debug_if; // @[rename-stage.scala:356:7] wire io_com_uops_1_bp_xcpt_if_0 = io_com_uops_1_bp_xcpt_if; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_1_debug_fsrc_0 = io_com_uops_1_debug_fsrc; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_1_debug_tsrc_0 = io_com_uops_1_debug_tsrc; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_2_uopc_0 = io_com_uops_2_uopc; // @[rename-stage.scala:356:7] wire [31:0] io_com_uops_2_inst_0 = io_com_uops_2_inst; // @[rename-stage.scala:356:7] wire [31:0] io_com_uops_2_debug_inst_0 = io_com_uops_2_debug_inst; // @[rename-stage.scala:356:7] wire io_com_uops_2_is_rvc_0 = io_com_uops_2_is_rvc; // @[rename-stage.scala:356:7] wire [39:0] io_com_uops_2_debug_pc_0 = io_com_uops_2_debug_pc; // @[rename-stage.scala:356:7] wire [2:0] io_com_uops_2_iq_type_0 = io_com_uops_2_iq_type; // @[rename-stage.scala:356:7] wire [9:0] io_com_uops_2_fu_code_0 = io_com_uops_2_fu_code; // @[rename-stage.scala:356:7] wire [3:0] io_com_uops_2_ctrl_br_type_0 = io_com_uops_2_ctrl_br_type; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_2_ctrl_op1_sel_0 = io_com_uops_2_ctrl_op1_sel; // @[rename-stage.scala:356:7] wire [2:0] io_com_uops_2_ctrl_op2_sel_0 = io_com_uops_2_ctrl_op2_sel; // @[rename-stage.scala:356:7] wire [2:0] io_com_uops_2_ctrl_imm_sel_0 = io_com_uops_2_ctrl_imm_sel; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_2_ctrl_op_fcn_0 = io_com_uops_2_ctrl_op_fcn; // @[rename-stage.scala:356:7] wire io_com_uops_2_ctrl_fcn_dw_0 = io_com_uops_2_ctrl_fcn_dw; // @[rename-stage.scala:356:7] wire [2:0] io_com_uops_2_ctrl_csr_cmd_0 = io_com_uops_2_ctrl_csr_cmd; // @[rename-stage.scala:356:7] wire io_com_uops_2_ctrl_is_load_0 = io_com_uops_2_ctrl_is_load; // @[rename-stage.scala:356:7] wire io_com_uops_2_ctrl_is_sta_0 = io_com_uops_2_ctrl_is_sta; // @[rename-stage.scala:356:7] wire io_com_uops_2_ctrl_is_std_0 = io_com_uops_2_ctrl_is_std; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_2_iw_state_0 = io_com_uops_2_iw_state; // @[rename-stage.scala:356:7] wire io_com_uops_2_iw_p1_poisoned_0 = io_com_uops_2_iw_p1_poisoned; // @[rename-stage.scala:356:7] wire io_com_uops_2_iw_p2_poisoned_0 = io_com_uops_2_iw_p2_poisoned; // @[rename-stage.scala:356:7] wire io_com_uops_2_is_br_0 = io_com_uops_2_is_br; // @[rename-stage.scala:356:7] wire io_com_uops_2_is_jalr_0 = io_com_uops_2_is_jalr; // @[rename-stage.scala:356:7] wire io_com_uops_2_is_jal_0 = io_com_uops_2_is_jal; // @[rename-stage.scala:356:7] wire io_com_uops_2_is_sfb_0 = io_com_uops_2_is_sfb; // @[rename-stage.scala:356:7] wire [15:0] io_com_uops_2_br_mask_0 = io_com_uops_2_br_mask; // @[rename-stage.scala:356:7] wire [3:0] io_com_uops_2_br_tag_0 = io_com_uops_2_br_tag; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_2_ftq_idx_0 = io_com_uops_2_ftq_idx; // @[rename-stage.scala:356:7] wire io_com_uops_2_edge_inst_0 = io_com_uops_2_edge_inst; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_2_pc_lob_0 = io_com_uops_2_pc_lob; // @[rename-stage.scala:356:7] wire io_com_uops_2_taken_0 = io_com_uops_2_taken; // @[rename-stage.scala:356:7] wire [19:0] io_com_uops_2_imm_packed_0 = io_com_uops_2_imm_packed; // @[rename-stage.scala:356:7] wire [11:0] io_com_uops_2_csr_addr_0 = io_com_uops_2_csr_addr; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_2_rob_idx_0 = io_com_uops_2_rob_idx; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_2_ldq_idx_0 = io_com_uops_2_ldq_idx; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_2_stq_idx_0 = io_com_uops_2_stq_idx; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_2_rxq_idx_0 = io_com_uops_2_rxq_idx; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_2_pdst_0 = io_com_uops_2_pdst; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_2_prs1_0 = io_com_uops_2_prs1; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_2_prs2_0 = io_com_uops_2_prs2; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_2_prs3_0 = io_com_uops_2_prs3; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_2_ppred_0 = io_com_uops_2_ppred; // @[rename-stage.scala:356:7] wire io_com_uops_2_prs1_busy_0 = io_com_uops_2_prs1_busy; // @[rename-stage.scala:356:7] wire io_com_uops_2_prs2_busy_0 = io_com_uops_2_prs2_busy; // @[rename-stage.scala:356:7] wire io_com_uops_2_prs3_busy_0 = io_com_uops_2_prs3_busy; // @[rename-stage.scala:356:7] wire io_com_uops_2_ppred_busy_0 = io_com_uops_2_ppred_busy; // @[rename-stage.scala:356:7] wire [6:0] io_com_uops_2_stale_pdst_0 = io_com_uops_2_stale_pdst; // @[rename-stage.scala:356:7] wire io_com_uops_2_exception_0 = io_com_uops_2_exception; // @[rename-stage.scala:356:7] wire [63:0] io_com_uops_2_exc_cause_0 = io_com_uops_2_exc_cause; // @[rename-stage.scala:356:7] wire io_com_uops_2_bypassable_0 = io_com_uops_2_bypassable; // @[rename-stage.scala:356:7] wire [4:0] io_com_uops_2_mem_cmd_0 = io_com_uops_2_mem_cmd; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_2_mem_size_0 = io_com_uops_2_mem_size; // @[rename-stage.scala:356:7] wire io_com_uops_2_mem_signed_0 = io_com_uops_2_mem_signed; // @[rename-stage.scala:356:7] wire io_com_uops_2_is_fence_0 = io_com_uops_2_is_fence; // @[rename-stage.scala:356:7] wire io_com_uops_2_is_fencei_0 = io_com_uops_2_is_fencei; // @[rename-stage.scala:356:7] wire io_com_uops_2_is_amo_0 = io_com_uops_2_is_amo; // @[rename-stage.scala:356:7] wire io_com_uops_2_uses_ldq_0 = io_com_uops_2_uses_ldq; // @[rename-stage.scala:356:7] wire io_com_uops_2_uses_stq_0 = io_com_uops_2_uses_stq; // @[rename-stage.scala:356:7] wire io_com_uops_2_is_sys_pc2epc_0 = io_com_uops_2_is_sys_pc2epc; // @[rename-stage.scala:356:7] wire io_com_uops_2_is_unique_0 = io_com_uops_2_is_unique; // @[rename-stage.scala:356:7] wire io_com_uops_2_flush_on_commit_0 = io_com_uops_2_flush_on_commit; // @[rename-stage.scala:356:7] wire io_com_uops_2_ldst_is_rs1_0 = io_com_uops_2_ldst_is_rs1; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_2_ldst_0 = io_com_uops_2_ldst; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_2_lrs1_0 = io_com_uops_2_lrs1; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_2_lrs2_0 = io_com_uops_2_lrs2; // @[rename-stage.scala:356:7] wire [5:0] io_com_uops_2_lrs3_0 = io_com_uops_2_lrs3; // @[rename-stage.scala:356:7] wire io_com_uops_2_ldst_val_0 = io_com_uops_2_ldst_val; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_2_dst_rtype_0 = io_com_uops_2_dst_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_2_lrs1_rtype_0 = io_com_uops_2_lrs1_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_2_lrs2_rtype_0 = io_com_uops_2_lrs2_rtype; // @[rename-stage.scala:356:7] wire io_com_uops_2_frs3_en_0 = io_com_uops_2_frs3_en; // @[rename-stage.scala:356:7] wire io_com_uops_2_fp_val_0 = io_com_uops_2_fp_val; // @[rename-stage.scala:356:7] wire io_com_uops_2_fp_single_0 = io_com_uops_2_fp_single; // @[rename-stage.scala:356:7] wire io_com_uops_2_xcpt_pf_if_0 = io_com_uops_2_xcpt_pf_if; // @[rename-stage.scala:356:7] wire io_com_uops_2_xcpt_ae_if_0 = io_com_uops_2_xcpt_ae_if; // @[rename-stage.scala:356:7] wire io_com_uops_2_xcpt_ma_if_0 = io_com_uops_2_xcpt_ma_if; // @[rename-stage.scala:356:7] wire io_com_uops_2_bp_debug_if_0 = io_com_uops_2_bp_debug_if; // @[rename-stage.scala:356:7] wire io_com_uops_2_bp_xcpt_if_0 = io_com_uops_2_bp_xcpt_if; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_2_debug_fsrc_0 = io_com_uops_2_debug_fsrc; // @[rename-stage.scala:356:7] wire [1:0] io_com_uops_2_debug_tsrc_0 = io_com_uops_2_debug_tsrc; // @[rename-stage.scala:356:7] wire io_rbk_valids_0_0 = io_rbk_valids_0; // @[rename-stage.scala:356:7] wire io_rbk_valids_1_0 = io_rbk_valids_1; // @[rename-stage.scala:356:7] wire io_rbk_valids_2_0 = io_rbk_valids_2; // @[rename-stage.scala:356:7] wire io_rollback_0 = io_rollback; // @[rename-stage.scala:356:7] wire io_debug_rob_empty_0 = io_debug_rob_empty; // @[rename-stage.scala:356:7] wire [7:0] lo_lo_1 = 8'h0; // @[rename-stage.scala:402:{47,65}] wire [7:0] lo_hi_1 = 8'h0; // @[rename-stage.scala:402:{47,65}] wire [7:0] hi_lo_1 = 8'h0; // @[rename-stage.scala:402:{47,65}] wire [7:0] hi_hi_1 = 8'h0; // @[rename-stage.scala:402:{47,65}] wire [7:0] lo_lo_2 = 8'h0; // @[rename-stage.scala:402:{47,65}] wire [7:0] lo_hi_2 = 8'h0; // @[rename-stage.scala:402:{47,65}] wire [7:0] hi_lo_2 = 8'h0; // @[rename-stage.scala:402:{47,65}] wire [7:0] hi_hi_2 = 8'h0; // @[rename-stage.scala:402:{47,65}] wire [63:0] io_wakeups_0_bits_uop_exc_cause = 64'h0; // @[rename-stage.scala:60:14, :356:7] wire [63:0] io_wakeups_0_bits_data = 64'h0; // @[rename-stage.scala:60:14, :356:7] wire [63:0] io_wakeups_0_bits_fflags_bits_uop_exc_cause = 64'h0; // @[rename-stage.scala:60:14, :356:7] wire [19:0] io_wakeups_0_bits_uop_imm_packed = 20'h0; // @[rename-stage.scala:60:14, :356:7] wire [19:0] io_wakeups_0_bits_fflags_bits_uop_imm_packed = 20'h0; // @[rename-stage.scala:60:14, :356:7] wire [5:0] io_wakeups_0_bits_uop_pc_lob = 6'h0; // @[rename-stage.scala:60:14, :356:7] wire [5:0] io_wakeups_0_bits_uop_ldst = 6'h0; // @[rename-stage.scala:60:14, :356:7] wire [5:0] io_wakeups_0_bits_uop_lrs1 = 6'h0; // @[rename-stage.scala:60:14, :356:7] wire [5:0] io_wakeups_0_bits_uop_lrs2 = 6'h0; // @[rename-stage.scala:60:14, :356:7] wire [5:0] io_wakeups_0_bits_uop_lrs3 = 6'h0; // @[rename-stage.scala:60:14, :356:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_pc_lob = 6'h0; // @[rename-stage.scala:60:14, :356:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_ldst = 6'h0; // @[rename-stage.scala:60:14, :356:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs1 = 6'h0; // @[rename-stage.scala:60:14, :356:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs2 = 6'h0; // @[rename-stage.scala:60:14, :356:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs3 = 6'h0; // @[rename-stage.scala:60:14, :356:7] wire [15:0] io_wakeups_0_bits_uop_br_mask = 16'h0; // @[rename-stage.scala:60:14, :356:7, :402:{47,65}] wire [15:0] io_wakeups_0_bits_fflags_bits_uop_br_mask = 16'h0; // @[rename-stage.scala:60:14, :356:7, :402:{47,65}] wire [15:0] lo_1 = 16'h0; // @[rename-stage.scala:60:14, :356:7, :402:{47,65}] wire [15:0] hi_1 = 16'h0; // @[rename-stage.scala:60:14, :356:7, :402:{47,65}] wire [15:0] lo_2 = 16'h0; // @[rename-stage.scala:60:14, :356:7, :402:{47,65}] wire [15:0] hi_2 = 16'h0; // @[rename-stage.scala:60:14, :356:7, :402:{47,65}] wire [9:0] io_wakeups_0_bits_uop_fu_code = 10'h0; // @[rename-stage.scala:60:14, :356:7] wire [9:0] io_wakeups_0_bits_fflags_bits_uop_fu_code = 10'h0; // @[rename-stage.scala:60:14, :356:7] wire [39:0] io_wakeups_0_bits_uop_debug_pc = 40'h0; // @[rename-stage.scala:60:14, :356:7] wire [39:0] io_wakeups_0_bits_fflags_bits_uop_debug_pc = 40'h0; // @[rename-stage.scala:60:14, :356:7] wire [31:0] io_wakeups_0_bits_uop_inst = 32'h0; // @[rename-stage.scala:356:7] wire [31:0] io_wakeups_0_bits_uop_debug_inst = 32'h0; // @[rename-stage.scala:356:7] wire [31:0] io_wakeups_0_bits_fflags_bits_uop_inst = 32'h0; // @[rename-stage.scala:356:7] wire [31:0] io_wakeups_0_bits_fflags_bits_uop_debug_inst = 32'h0; // @[rename-stage.scala:356:7] wire [31:0] io_debug_freelist = 32'h0; // @[rename-stage.scala:356:7] wire [31:0] io_debug_isprlist = 32'h0; // @[rename-stage.scala:356:7] wire [31:0] io_debug_busytable = 32'h0; // @[rename-stage.scala:356:7] wire _io_ren2_uops_0_ppred_busy_T_1 = 1'h1; // @[rename-stage.scala:390:92] wire _io_ren2_uops_1_ppred_busy_T_1 = 1'h1; // @[rename-stage.scala:390:92] wire _io_ren2_uops_2_ppred_busy_T_1 = 1'h1; // @[rename-stage.scala:390:92] wire [6:0] io_dec_uops_0_rob_idx = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_0_pdst = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_0_prs1 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_0_prs2 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_0_prs3 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_0_stale_pdst = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_1_rob_idx = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_1_pdst = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_1_prs1 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_1_prs2 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_1_prs3 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_1_stale_pdst = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_2_rob_idx = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_2_pdst = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_2_prs1 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_2_prs2 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_2_prs3 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_dec_uops_2_stale_pdst = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_uop_uopc = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_uop_rob_idx = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_uop_pdst = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_uop_prs1 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_uop_prs2 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_uop_prs3 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_uop_stale_pdst = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_fflags_bits_uop_uopc = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_fflags_bits_uop_rob_idx = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_fflags_bits_uop_pdst = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_fflags_bits_uop_prs1 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_fflags_bits_uop_prs2 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_fflags_bits_uop_prs3 = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] io_wakeups_0_bits_fflags_bits_uop_stale_pdst = 7'h0; // @[rename-stage.scala:356:7] wire [6:0] ren1_uops_0_rob_idx = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_0_pdst = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_0_prs1 = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_0_prs2 = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_0_prs3 = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_0_stale_pdst = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_1_rob_idx = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_1_pdst = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_1_prs1 = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_1_prs2 = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_1_prs3 = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_1_stale_pdst = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_2_rob_idx = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_2_pdst = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_2_prs1 = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_2_prs2 = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_2_prs3 = 7'h0; // @[rename-stage.scala:101:29] wire [6:0] ren1_uops_2_stale_pdst = 7'h0; // @[rename-stage.scala:101:29] wire [11:0] io_dec_uops_0_csr_addr = 12'h0; // @[rename-stage.scala:356:7] wire [11:0] io_dec_uops_1_csr_addr = 12'h0; // @[rename-stage.scala:356:7] wire [11:0] io_dec_uops_2_csr_addr = 12'h0; // @[rename-stage.scala:356:7] wire [11:0] io_wakeups_0_bits_uop_csr_addr = 12'h0; // @[rename-stage.scala:356:7] wire [11:0] io_wakeups_0_bits_fflags_bits_uop_csr_addr = 12'h0; // @[rename-stage.scala:356:7] wire [11:0] ren1_uops_0_csr_addr = 12'h0; // @[rename-stage.scala:101:29] wire [11:0] ren1_uops_1_csr_addr = 12'h0; // @[rename-stage.scala:101:29] wire [11:0] ren1_uops_2_csr_addr = 12'h0; // @[rename-stage.scala:101:29] wire io_ren_stalls_0 = 1'h0; // @[rename-stage.scala:356:7] wire io_ren_stalls_1 = 1'h0; // @[rename-stage.scala:356:7] wire io_ren_stalls_2 = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_0_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_0_ctrl_is_load = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_0_ctrl_is_sta = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_0_ctrl_is_std = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_0_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_0_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_0_prs1_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_0_prs2_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_0_prs3_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_0_ppred_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_0_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_0_xcpt_ma_if = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_1_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_1_ctrl_is_load = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_1_ctrl_is_sta = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_1_ctrl_is_std = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_1_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_1_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_1_prs1_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_1_prs2_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_1_prs3_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_1_ppred_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_1_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_1_xcpt_ma_if = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_2_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_2_ctrl_is_load = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_2_ctrl_is_sta = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_2_ctrl_is_std = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_2_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_2_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_2_prs1_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_2_prs2_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_2_prs3_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_2_ppred_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_2_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:356:7] wire io_dec_uops_2_xcpt_ma_if = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_valid = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_is_rvc = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_ctrl_is_load = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_ctrl_is_sta = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_ctrl_is_std = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_is_br = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_is_jalr = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_is_jal = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_is_sfb = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_edge_inst = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_taken = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_prs1_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_prs2_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_prs3_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_ppred_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_exception = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_bypassable = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_mem_signed = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_is_fence = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_is_fencei = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_is_amo = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_uses_ldq = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_uses_stq = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_is_sys_pc2epc = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_is_unique = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_flush_on_commit = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_ldst_val = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_frs3_en = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_fp_val = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_fp_single = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_xcpt_pf_if = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_xcpt_ae_if = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_xcpt_ma_if = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_bp_debug_if = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_uop_bp_xcpt_if = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_predicated = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_valid = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_is_rvc = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_load = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_sta = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_std = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_is_br = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_is_jalr = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_is_jal = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_is_sfb = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_edge_inst = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_taken = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_prs1_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_prs2_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_prs3_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_ppred_busy = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_exception = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_bypassable = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_mem_signed = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_is_fence = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_is_fencei = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_is_amo = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_uses_ldq = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_uses_stq = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_is_sys_pc2epc = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_is_unique = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_flush_on_commit = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_ldst_val = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_frs3_en = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_fp_val = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_fp_single = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_xcpt_pf_if = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_xcpt_ae_if = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_xcpt_ma_if = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_bp_debug_if = 1'h0; // @[rename-stage.scala:356:7] wire io_wakeups_0_bits_fflags_bits_uop_bp_xcpt_if = 1'h0; // @[rename-stage.scala:356:7] wire ren1_uops_0_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_ctrl_is_load = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_ctrl_is_sta = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_ctrl_is_std = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_prs1_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_prs2_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_prs3_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_ppred_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_xcpt_ma_if = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_1_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_1_ctrl_is_load = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_1_ctrl_is_sta = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_1_ctrl_is_std = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_1_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_1_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_1_prs1_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_1_prs2_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_1_prs3_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_1_ppred_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_1_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_1_xcpt_ma_if = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_2_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_2_ctrl_is_load = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_2_ctrl_is_sta = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_2_ctrl_is_std = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_2_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_2_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_2_prs1_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_2_prs2_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_2_prs3_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_2_ppred_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_2_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_2_xcpt_ma_if = 1'h0; // @[rename-stage.scala:101:29] wire ren2_alloc_reqs_0 = 1'h0; // @[rename-stage.scala:109:29] wire ren2_alloc_reqs_1 = 1'h0; // @[rename-stage.scala:109:29] wire ren2_alloc_reqs_2 = 1'h0; // @[rename-stage.scala:109:29] wire _busy_table_WIRE_0 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_1 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_2 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_3 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_4 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_5 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_6 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_7 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_8 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_9 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_10 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_11 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_12 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_13 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_14 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_15 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_16 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_17 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_18 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_19 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_20 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_21 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_22 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_23 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_24 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_25 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_26 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_27 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_28 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_29 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_30 = 1'h0; // @[rename-stage.scala:368:35] wire _busy_table_WIRE_31 = 1'h0; // @[rename-stage.scala:368:35] wire _to_busy_WIRE_0 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_1 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_2 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_3 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_4 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_5 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_6 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_7 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_8 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_9 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_10 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_11 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_12 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_13 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_14 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_15 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_16 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_17 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_18 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_19 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_20 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_21 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_22 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_23 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_24 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_25 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_26 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_27 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_28 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_29 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_30 = 1'h0; // @[rename-stage.scala:369:33] wire _to_busy_WIRE_31 = 1'h0; // @[rename-stage.scala:369:33] wire to_busy_0 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_1 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_2 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_3 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_4 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_5 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_6 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_7 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_8 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_9 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_10 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_11 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_12 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_13 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_14 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_15 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_16 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_17 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_18 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_19 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_20 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_21 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_22 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_23 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_24 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_25 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_26 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_27 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_28 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_29 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_30 = 1'h0; // @[rename-stage.scala:369:25] wire to_busy_31 = 1'h0; // @[rename-stage.scala:369:25] wire _unbusy_WIRE_0 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_1 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_2 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_3 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_4 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_5 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_6 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_7 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_8 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_9 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_10 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_11 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_12 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_13 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_14 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_15 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_16 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_17 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_18 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_19 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_20 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_21 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_22 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_23 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_24 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_25 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_26 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_27 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_28 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_29 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_30 = 1'h0; // @[rename-stage.scala:370:32] wire _unbusy_WIRE_31 = 1'h0; // @[rename-stage.scala:370:32] wire unbusy_0 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_1 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_2 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_3 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_4 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_5 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_6 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_7 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_8 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_9 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_10 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_11 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_12 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_13 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_14 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_15 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_16 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_17 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_18 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_19 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_20 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_21 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_22 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_23 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_24 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_25 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_26 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_27 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_28 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_29 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_30 = 1'h0; // @[rename-stage.scala:370:24] wire unbusy_31 = 1'h0; // @[rename-stage.scala:370:24] wire _is_sfb_br_T_1 = 1'h0; // @[micro-op.scala:109:42] wire is_sfb_br = 1'h0; // @[rename-stage.scala:378:44] wire _is_sfb_shadow_T_2 = 1'h0; // @[micro-op.scala:110:43] wire is_sfb_shadow = 1'h0; // @[rename-stage.scala:379:52] wire _is_sfb_br_T_3 = 1'h0; // @[micro-op.scala:109:42] wire is_sfb_br_1 = 1'h0; // @[rename-stage.scala:378:44] wire _is_sfb_shadow_T_5 = 1'h0; // @[micro-op.scala:110:43] wire is_sfb_shadow_1 = 1'h0; // @[rename-stage.scala:379:52] wire _is_sfb_br_T_5 = 1'h0; // @[micro-op.scala:109:42] wire is_sfb_br_2 = 1'h0; // @[rename-stage.scala:378:44] wire _is_sfb_shadow_T_8 = 1'h0; // @[micro-op.scala:110:43] wire is_sfb_shadow_2 = 1'h0; // @[rename-stage.scala:379:52] wire [4:0] io_dec_uops_0_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_0_ldq_idx = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_0_stq_idx = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_0_ppred = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_1_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_1_ldq_idx = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_1_stq_idx = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_1_ppred = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_2_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_2_ldq_idx = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_2_stq_idx = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_dec_uops_2_ppred = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_uop_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_uop_ftq_idx = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_uop_ldq_idx = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_uop_stq_idx = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_uop_ppred = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_uop_mem_cmd = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ftq_idx = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ldq_idx = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_fflags_bits_uop_stq_idx = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ppred = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_fflags_bits_uop_mem_cmd = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] io_wakeups_0_bits_fflags_bits_flags = 5'h0; // @[rename-stage.scala:356:7] wire [4:0] ren1_uops_0_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:101:29] wire [4:0] ren1_uops_0_ldq_idx = 5'h0; // @[rename-stage.scala:101:29] wire [4:0] ren1_uops_0_stq_idx = 5'h0; // @[rename-stage.scala:101:29] wire [4:0] ren1_uops_0_ppred = 5'h0; // @[rename-stage.scala:101:29] wire [4:0] ren1_uops_1_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:101:29] wire [4:0] ren1_uops_1_ldq_idx = 5'h0; // @[rename-stage.scala:101:29] wire [4:0] ren1_uops_1_stq_idx = 5'h0; // @[rename-stage.scala:101:29] wire [4:0] ren1_uops_1_ppred = 5'h0; // @[rename-stage.scala:101:29] wire [4:0] ren1_uops_2_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:101:29] wire [4:0] ren1_uops_2_ldq_idx = 5'h0; // @[rename-stage.scala:101:29] wire [4:0] ren1_uops_2_stq_idx = 5'h0; // @[rename-stage.scala:101:29] wire [4:0] ren1_uops_2_ppred = 5'h0; // @[rename-stage.scala:101:29] wire [2:0] io_dec_uops_0_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_dec_uops_0_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_dec_uops_0_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_dec_uops_1_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_dec_uops_1_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_dec_uops_1_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_dec_uops_2_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_dec_uops_2_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_dec_uops_2_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_wakeups_0_bits_uop_iq_type = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_wakeups_0_bits_uop_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_wakeups_0_bits_uop_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_wakeups_0_bits_uop_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_wakeups_0_bits_fflags_bits_uop_iq_type = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:356:7] wire [2:0] ren1_uops_0_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:101:29] wire [2:0] ren1_uops_0_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:101:29] wire [2:0] ren1_uops_0_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:101:29] wire [2:0] ren1_uops_1_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:101:29] wire [2:0] ren1_uops_1_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:101:29] wire [2:0] ren1_uops_1_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:101:29] wire [2:0] ren1_uops_2_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:101:29] wire [2:0] ren1_uops_2_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:101:29] wire [2:0] ren1_uops_2_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:101:29] wire [1:0] io_dec_uops_0_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_0_iw_state = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_0_rxq_idx = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_0_debug_tsrc = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_1_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_1_iw_state = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_1_rxq_idx = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_1_debug_tsrc = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_2_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_2_iw_state = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_2_rxq_idx = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_dec_uops_2_debug_tsrc = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_uop_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_uop_iw_state = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_uop_rxq_idx = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_uop_mem_size = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_uop_dst_rtype = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_uop_lrs1_rtype = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_uop_lrs2_rtype = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_uop_debug_fsrc = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_uop_debug_tsrc = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_iw_state = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_rxq_idx = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_mem_size = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_dst_rtype = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs1_rtype = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs2_rtype = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_fsrc = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_tsrc = 2'h0; // @[rename-stage.scala:356:7] wire [1:0] ren1_uops_0_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_0_iw_state = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_0_rxq_idx = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_0_debug_tsrc = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_1_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_1_iw_state = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_1_rxq_idx = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_1_debug_tsrc = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_2_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_2_iw_state = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_2_rxq_idx = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_2_debug_tsrc = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] lo_lo_lo_lo_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] lo_lo_lo_hi_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] lo_lo_hi_lo_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] lo_lo_hi_hi_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] lo_hi_lo_lo_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] lo_hi_lo_hi_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] lo_hi_hi_lo_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] lo_hi_hi_hi_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] hi_lo_lo_lo_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] hi_lo_lo_hi_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] hi_lo_hi_lo_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] hi_lo_hi_hi_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] hi_hi_lo_lo_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] hi_hi_lo_hi_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] hi_hi_hi_lo_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] hi_hi_hi_hi_1 = 2'h0; // @[rename-stage.scala:402:47] wire [1:0] lo_lo_lo_lo_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] lo_lo_lo_hi_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] lo_lo_hi_lo_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] lo_lo_hi_hi_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] lo_hi_lo_lo_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] lo_hi_lo_hi_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] lo_hi_hi_lo_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] lo_hi_hi_hi_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] hi_lo_lo_lo_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] hi_lo_lo_hi_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] hi_lo_hi_lo_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] hi_lo_hi_hi_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] hi_hi_lo_lo_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] hi_hi_lo_hi_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] hi_hi_hi_lo_2 = 2'h0; // @[rename-stage.scala:402:65] wire [1:0] hi_hi_hi_hi_2 = 2'h0; // @[rename-stage.scala:402:65] wire [3:0] io_dec_uops_0_ctrl_br_type = 4'h0; // @[rename-stage.scala:356:7] wire [3:0] io_dec_uops_1_ctrl_br_type = 4'h0; // @[rename-stage.scala:356:7] wire [3:0] io_dec_uops_2_ctrl_br_type = 4'h0; // @[rename-stage.scala:356:7] wire [3:0] io_wakeups_0_bits_uop_ctrl_br_type = 4'h0; // @[rename-stage.scala:356:7] wire [3:0] io_wakeups_0_bits_uop_br_tag = 4'h0; // @[rename-stage.scala:356:7] wire [3:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_br_type = 4'h0; // @[rename-stage.scala:356:7] wire [3:0] io_wakeups_0_bits_fflags_bits_uop_br_tag = 4'h0; // @[rename-stage.scala:356:7] wire [3:0] ren1_uops_0_ctrl_br_type = 4'h0; // @[rename-stage.scala:101:29] wire [3:0] ren1_uops_1_ctrl_br_type = 4'h0; // @[rename-stage.scala:101:29] wire [3:0] ren1_uops_2_ctrl_br_type = 4'h0; // @[rename-stage.scala:101:29] wire [3:0] lo_lo_lo_1 = 4'h0; // @[rename-stage.scala:402:47] wire [3:0] lo_lo_hi_1 = 4'h0; // @[rename-stage.scala:402:47] wire [3:0] lo_hi_lo_1 = 4'h0; // @[rename-stage.scala:402:47] wire [3:0] lo_hi_hi_1 = 4'h0; // @[rename-stage.scala:402:47] wire [3:0] hi_lo_lo_1 = 4'h0; // @[rename-stage.scala:402:47] wire [3:0] hi_lo_hi_1 = 4'h0; // @[rename-stage.scala:402:47] wire [3:0] hi_hi_lo_1 = 4'h0; // @[rename-stage.scala:402:47] wire [3:0] hi_hi_hi_1 = 4'h0; // @[rename-stage.scala:402:47] wire [3:0] lo_lo_lo_2 = 4'h0; // @[rename-stage.scala:402:65] wire [3:0] lo_lo_hi_2 = 4'h0; // @[rename-stage.scala:402:65] wire [3:0] lo_hi_lo_2 = 4'h0; // @[rename-stage.scala:402:65] wire [3:0] lo_hi_hi_2 = 4'h0; // @[rename-stage.scala:402:65] wire [3:0] hi_lo_lo_2 = 4'h0; // @[rename-stage.scala:402:65] wire [3:0] hi_lo_hi_2 = 4'h0; // @[rename-stage.scala:402:65] wire [3:0] hi_hi_lo_2 = 4'h0; // @[rename-stage.scala:402:65] wire [3:0] hi_hi_hi_2 = 4'h0; // @[rename-stage.scala:402:65] wire ren1_fire_0 = io_dec_fire_0_0; // @[rename-stage.scala:100:29, :356:7] wire ren1_fire_1 = io_dec_fire_1_0; // @[rename-stage.scala:100:29, :356:7] wire ren1_fire_2 = io_dec_fire_2_0; // @[rename-stage.scala:100:29, :356:7] wire [6:0] ren1_uops_0_uopc = io_dec_uops_0_uopc_0; // @[rename-stage.scala:101:29, :356:7] wire [31:0] ren1_uops_0_inst = io_dec_uops_0_inst_0; // @[rename-stage.scala:101:29, :356:7] wire [31:0] ren1_uops_0_debug_inst = io_dec_uops_0_debug_inst_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_is_rvc = io_dec_uops_0_is_rvc_0; // @[rename-stage.scala:101:29, :356:7] wire [39:0] ren1_uops_0_debug_pc = io_dec_uops_0_debug_pc_0; // @[rename-stage.scala:101:29, :356:7] wire [2:0] ren1_uops_0_iq_type = io_dec_uops_0_iq_type_0; // @[rename-stage.scala:101:29, :356:7] wire [9:0] ren1_uops_0_fu_code = io_dec_uops_0_fu_code_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_is_br = io_dec_uops_0_is_br_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_is_jalr = io_dec_uops_0_is_jalr_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_is_jal = io_dec_uops_0_is_jal_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_is_sfb = io_dec_uops_0_is_sfb_0; // @[rename-stage.scala:101:29, :356:7] wire [15:0] ren1_uops_0_br_mask = io_dec_uops_0_br_mask_0; // @[rename-stage.scala:101:29, :356:7] wire [3:0] ren1_uops_0_br_tag = io_dec_uops_0_br_tag_0; // @[rename-stage.scala:101:29, :356:7] wire [4:0] ren1_uops_0_ftq_idx = io_dec_uops_0_ftq_idx_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_edge_inst = io_dec_uops_0_edge_inst_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_0_pc_lob = io_dec_uops_0_pc_lob_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_taken = io_dec_uops_0_taken_0; // @[rename-stage.scala:101:29, :356:7] wire [19:0] ren1_uops_0_imm_packed = io_dec_uops_0_imm_packed_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_exception = io_dec_uops_0_exception_0; // @[rename-stage.scala:101:29, :356:7] wire [63:0] ren1_uops_0_exc_cause = io_dec_uops_0_exc_cause_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_bypassable = io_dec_uops_0_bypassable_0; // @[rename-stage.scala:101:29, :356:7] wire [4:0] ren1_uops_0_mem_cmd = io_dec_uops_0_mem_cmd_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_0_mem_size = io_dec_uops_0_mem_size_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_mem_signed = io_dec_uops_0_mem_signed_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_is_fence = io_dec_uops_0_is_fence_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_is_fencei = io_dec_uops_0_is_fencei_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_is_amo = io_dec_uops_0_is_amo_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_uses_ldq = io_dec_uops_0_uses_ldq_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_uses_stq = io_dec_uops_0_uses_stq_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_is_sys_pc2epc = io_dec_uops_0_is_sys_pc2epc_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_is_unique = io_dec_uops_0_is_unique_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_flush_on_commit = io_dec_uops_0_flush_on_commit_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_0_ldst = io_dec_uops_0_ldst_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_0_lrs1 = io_dec_uops_0_lrs1_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_0_lrs2 = io_dec_uops_0_lrs2_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_0_lrs3 = io_dec_uops_0_lrs3_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_ldst_val = io_dec_uops_0_ldst_val_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_0_dst_rtype = io_dec_uops_0_dst_rtype_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_0_lrs1_rtype = io_dec_uops_0_lrs1_rtype_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_0_lrs2_rtype = io_dec_uops_0_lrs2_rtype_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_frs3_en = io_dec_uops_0_frs3_en_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_fp_val = io_dec_uops_0_fp_val_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_fp_single = io_dec_uops_0_fp_single_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_xcpt_pf_if = io_dec_uops_0_xcpt_pf_if_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_xcpt_ae_if = io_dec_uops_0_xcpt_ae_if_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_bp_debug_if = io_dec_uops_0_bp_debug_if_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_0_bp_xcpt_if = io_dec_uops_0_bp_xcpt_if_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_0_debug_fsrc = io_dec_uops_0_debug_fsrc_0; // @[rename-stage.scala:101:29, :356:7] wire [6:0] ren1_uops_1_uopc = io_dec_uops_1_uopc_0; // @[rename-stage.scala:101:29, :356:7] wire [31:0] ren1_uops_1_inst = io_dec_uops_1_inst_0; // @[rename-stage.scala:101:29, :356:7] wire [31:0] ren1_uops_1_debug_inst = io_dec_uops_1_debug_inst_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_is_rvc = io_dec_uops_1_is_rvc_0; // @[rename-stage.scala:101:29, :356:7] wire [39:0] ren1_uops_1_debug_pc = io_dec_uops_1_debug_pc_0; // @[rename-stage.scala:101:29, :356:7] wire [2:0] ren1_uops_1_iq_type = io_dec_uops_1_iq_type_0; // @[rename-stage.scala:101:29, :356:7] wire [9:0] ren1_uops_1_fu_code = io_dec_uops_1_fu_code_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_is_br = io_dec_uops_1_is_br_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_is_jalr = io_dec_uops_1_is_jalr_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_is_jal = io_dec_uops_1_is_jal_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_is_sfb = io_dec_uops_1_is_sfb_0; // @[rename-stage.scala:101:29, :356:7] wire [15:0] ren1_uops_1_br_mask = io_dec_uops_1_br_mask_0; // @[rename-stage.scala:101:29, :356:7] wire [3:0] ren1_uops_1_br_tag = io_dec_uops_1_br_tag_0; // @[rename-stage.scala:101:29, :356:7] wire [4:0] ren1_uops_1_ftq_idx = io_dec_uops_1_ftq_idx_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_edge_inst = io_dec_uops_1_edge_inst_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_1_pc_lob = io_dec_uops_1_pc_lob_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_taken = io_dec_uops_1_taken_0; // @[rename-stage.scala:101:29, :356:7] wire [19:0] ren1_uops_1_imm_packed = io_dec_uops_1_imm_packed_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_exception = io_dec_uops_1_exception_0; // @[rename-stage.scala:101:29, :356:7] wire [63:0] ren1_uops_1_exc_cause = io_dec_uops_1_exc_cause_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_bypassable = io_dec_uops_1_bypassable_0; // @[rename-stage.scala:101:29, :356:7] wire [4:0] ren1_uops_1_mem_cmd = io_dec_uops_1_mem_cmd_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_1_mem_size = io_dec_uops_1_mem_size_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_mem_signed = io_dec_uops_1_mem_signed_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_is_fence = io_dec_uops_1_is_fence_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_is_fencei = io_dec_uops_1_is_fencei_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_is_amo = io_dec_uops_1_is_amo_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_uses_ldq = io_dec_uops_1_uses_ldq_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_uses_stq = io_dec_uops_1_uses_stq_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_is_sys_pc2epc = io_dec_uops_1_is_sys_pc2epc_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_is_unique = io_dec_uops_1_is_unique_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_flush_on_commit = io_dec_uops_1_flush_on_commit_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_1_ldst = io_dec_uops_1_ldst_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_1_lrs1 = io_dec_uops_1_lrs1_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_1_lrs2 = io_dec_uops_1_lrs2_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_1_lrs3 = io_dec_uops_1_lrs3_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_ldst_val = io_dec_uops_1_ldst_val_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_1_dst_rtype = io_dec_uops_1_dst_rtype_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_1_lrs1_rtype = io_dec_uops_1_lrs1_rtype_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_1_lrs2_rtype = io_dec_uops_1_lrs2_rtype_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_frs3_en = io_dec_uops_1_frs3_en_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_fp_val = io_dec_uops_1_fp_val_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_fp_single = io_dec_uops_1_fp_single_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_xcpt_pf_if = io_dec_uops_1_xcpt_pf_if_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_xcpt_ae_if = io_dec_uops_1_xcpt_ae_if_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_bp_debug_if = io_dec_uops_1_bp_debug_if_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_1_bp_xcpt_if = io_dec_uops_1_bp_xcpt_if_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_1_debug_fsrc = io_dec_uops_1_debug_fsrc_0; // @[rename-stage.scala:101:29, :356:7] wire [6:0] ren1_uops_2_uopc = io_dec_uops_2_uopc_0; // @[rename-stage.scala:101:29, :356:7] wire [31:0] ren1_uops_2_inst = io_dec_uops_2_inst_0; // @[rename-stage.scala:101:29, :356:7] wire [31:0] ren1_uops_2_debug_inst = io_dec_uops_2_debug_inst_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_is_rvc = io_dec_uops_2_is_rvc_0; // @[rename-stage.scala:101:29, :356:7] wire [39:0] ren1_uops_2_debug_pc = io_dec_uops_2_debug_pc_0; // @[rename-stage.scala:101:29, :356:7] wire [2:0] ren1_uops_2_iq_type = io_dec_uops_2_iq_type_0; // @[rename-stage.scala:101:29, :356:7] wire [9:0] ren1_uops_2_fu_code = io_dec_uops_2_fu_code_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_is_br = io_dec_uops_2_is_br_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_is_jalr = io_dec_uops_2_is_jalr_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_is_jal = io_dec_uops_2_is_jal_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_is_sfb = io_dec_uops_2_is_sfb_0; // @[rename-stage.scala:101:29, :356:7] wire [15:0] ren1_uops_2_br_mask = io_dec_uops_2_br_mask_0; // @[rename-stage.scala:101:29, :356:7] wire [3:0] ren1_uops_2_br_tag = io_dec_uops_2_br_tag_0; // @[rename-stage.scala:101:29, :356:7] wire [4:0] ren1_uops_2_ftq_idx = io_dec_uops_2_ftq_idx_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_edge_inst = io_dec_uops_2_edge_inst_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_2_pc_lob = io_dec_uops_2_pc_lob_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_taken = io_dec_uops_2_taken_0; // @[rename-stage.scala:101:29, :356:7] wire [19:0] ren1_uops_2_imm_packed = io_dec_uops_2_imm_packed_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_exception = io_dec_uops_2_exception_0; // @[rename-stage.scala:101:29, :356:7] wire [63:0] ren1_uops_2_exc_cause = io_dec_uops_2_exc_cause_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_bypassable = io_dec_uops_2_bypassable_0; // @[rename-stage.scala:101:29, :356:7] wire [4:0] ren1_uops_2_mem_cmd = io_dec_uops_2_mem_cmd_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_2_mem_size = io_dec_uops_2_mem_size_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_mem_signed = io_dec_uops_2_mem_signed_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_is_fence = io_dec_uops_2_is_fence_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_is_fencei = io_dec_uops_2_is_fencei_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_is_amo = io_dec_uops_2_is_amo_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_uses_ldq = io_dec_uops_2_uses_ldq_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_uses_stq = io_dec_uops_2_uses_stq_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_is_sys_pc2epc = io_dec_uops_2_is_sys_pc2epc_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_is_unique = io_dec_uops_2_is_unique_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_flush_on_commit = io_dec_uops_2_flush_on_commit_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_2_ldst = io_dec_uops_2_ldst_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_2_lrs1 = io_dec_uops_2_lrs1_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_2_lrs2 = io_dec_uops_2_lrs2_0; // @[rename-stage.scala:101:29, :356:7] wire [5:0] ren1_uops_2_lrs3 = io_dec_uops_2_lrs3_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_ldst_val = io_dec_uops_2_ldst_val_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_2_dst_rtype = io_dec_uops_2_dst_rtype_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_2_lrs1_rtype = io_dec_uops_2_lrs1_rtype_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_2_lrs2_rtype = io_dec_uops_2_lrs2_rtype_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_frs3_en = io_dec_uops_2_frs3_en_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_fp_val = io_dec_uops_2_fp_val_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_fp_single = io_dec_uops_2_fp_single_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_xcpt_pf_if = io_dec_uops_2_xcpt_pf_if_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_xcpt_ae_if = io_dec_uops_2_xcpt_ae_if_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_bp_debug_if = io_dec_uops_2_bp_debug_if_0; // @[rename-stage.scala:101:29, :356:7] wire ren1_uops_2_bp_xcpt_if = io_dec_uops_2_bp_xcpt_if_0; // @[rename-stage.scala:101:29, :356:7] wire [1:0] ren1_uops_2_debug_fsrc = io_dec_uops_2_debug_fsrc_0; // @[rename-stage.scala:101:29, :356:7] wire ren2_valids_0; // @[rename-stage.scala:107:29] wire ren2_valids_1; // @[rename-stage.scala:107:29] wire ren2_valids_2; // @[rename-stage.scala:107:29] wire [6:0] ren2_uops_0_uopc; // @[rename-stage.scala:108:29] wire [31:0] ren2_uops_0_inst; // @[rename-stage.scala:108:29] wire [31:0] ren2_uops_0_debug_inst; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_rvc; // @[rename-stage.scala:108:29] wire [39:0] ren2_uops_0_debug_pc; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_0_iq_type; // @[rename-stage.scala:108:29] wire [9:0] ren2_uops_0_fu_code; // @[rename-stage.scala:108:29] wire [3:0] ren2_uops_0_ctrl_br_type; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_ctrl_op1_sel; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_0_ctrl_op2_sel; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_0_ctrl_imm_sel; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_0_ctrl_op_fcn; // @[rename-stage.scala:108:29] wire ren2_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:108:29] wire ren2_uops_0_ctrl_is_load; // @[rename-stage.scala:108:29] wire ren2_uops_0_ctrl_is_sta; // @[rename-stage.scala:108:29] wire ren2_uops_0_ctrl_is_std; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_iw_state; // @[rename-stage.scala:108:29] wire ren2_uops_0_iw_p1_poisoned; // @[rename-stage.scala:108:29] wire ren2_uops_0_iw_p2_poisoned; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_br; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_jalr; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_jal; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29] wire [15:0] ren2_uops_0_br_mask; // @[rename-stage.scala:108:29] wire [3:0] ren2_uops_0_br_tag; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_0_ftq_idx; // @[rename-stage.scala:108:29] wire ren2_uops_0_edge_inst; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_pc_lob; // @[rename-stage.scala:108:29] wire ren2_uops_0_taken; // @[rename-stage.scala:108:29] wire [19:0] ren2_uops_0_imm_packed; // @[rename-stage.scala:108:29] wire [11:0] ren2_uops_0_csr_addr; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_0_rob_idx; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_0_ldq_idx; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_0_stq_idx; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_rxq_idx; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_0_pdst; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_0_prs1; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_0_prs2; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_0_prs3; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_0_ppred; // @[rename-stage.scala:108:29] wire ren2_uops_0_prs1_busy; // @[rename-stage.scala:108:29] wire ren2_uops_0_prs2_busy; // @[rename-stage.scala:108:29] wire ren2_uops_0_prs3_busy; // @[rename-stage.scala:108:29] wire ren2_uops_0_ppred_busy; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_0_stale_pdst; // @[rename-stage.scala:108:29] wire ren2_uops_0_exception; // @[rename-stage.scala:108:29] wire [63:0] ren2_uops_0_exc_cause; // @[rename-stage.scala:108:29] wire ren2_uops_0_bypassable; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_0_mem_cmd; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_mem_size; // @[rename-stage.scala:108:29] wire ren2_uops_0_mem_signed; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_fence; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_fencei; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_amo; // @[rename-stage.scala:108:29] wire ren2_uops_0_uses_ldq; // @[rename-stage.scala:108:29] wire ren2_uops_0_uses_stq; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_sys_pc2epc; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_unique; // @[rename-stage.scala:108:29] wire ren2_uops_0_flush_on_commit; // @[rename-stage.scala:108:29] wire ren2_uops_0_ldst_is_rs1; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_ldst; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_lrs1; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_lrs2; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_lrs3; // @[rename-stage.scala:108:29] wire ren2_uops_0_ldst_val; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_dst_rtype; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_lrs1_rtype; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_lrs2_rtype; // @[rename-stage.scala:108:29] wire ren2_uops_0_frs3_en; // @[rename-stage.scala:108:29] wire ren2_uops_0_fp_val; // @[rename-stage.scala:108:29] wire ren2_uops_0_fp_single; // @[rename-stage.scala:108:29] wire ren2_uops_0_xcpt_pf_if; // @[rename-stage.scala:108:29] wire ren2_uops_0_xcpt_ae_if; // @[rename-stage.scala:108:29] wire ren2_uops_0_xcpt_ma_if; // @[rename-stage.scala:108:29] wire ren2_uops_0_bp_debug_if; // @[rename-stage.scala:108:29] wire ren2_uops_0_bp_xcpt_if; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_debug_fsrc; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_debug_tsrc; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_1_uopc; // @[rename-stage.scala:108:29] wire [31:0] ren2_uops_1_inst; // @[rename-stage.scala:108:29] wire [31:0] ren2_uops_1_debug_inst; // @[rename-stage.scala:108:29] wire ren2_uops_1_is_rvc; // @[rename-stage.scala:108:29] wire [39:0] ren2_uops_1_debug_pc; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_1_iq_type; // @[rename-stage.scala:108:29] wire [9:0] ren2_uops_1_fu_code; // @[rename-stage.scala:108:29] wire [3:0] ren2_uops_1_ctrl_br_type; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_1_ctrl_op1_sel; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_1_ctrl_op2_sel; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_1_ctrl_imm_sel; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_1_ctrl_op_fcn; // @[rename-stage.scala:108:29] wire ren2_uops_1_ctrl_fcn_dw; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_1_ctrl_csr_cmd; // @[rename-stage.scala:108:29] wire ren2_uops_1_ctrl_is_load; // @[rename-stage.scala:108:29] wire ren2_uops_1_ctrl_is_sta; // @[rename-stage.scala:108:29] wire ren2_uops_1_ctrl_is_std; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_1_iw_state; // @[rename-stage.scala:108:29] wire ren2_uops_1_iw_p1_poisoned; // @[rename-stage.scala:108:29] wire ren2_uops_1_iw_p2_poisoned; // @[rename-stage.scala:108:29] wire ren2_uops_1_is_br; // @[rename-stage.scala:108:29] wire ren2_uops_1_is_jalr; // @[rename-stage.scala:108:29] wire ren2_uops_1_is_jal; // @[rename-stage.scala:108:29] wire ren2_uops_1_is_sfb; // @[rename-stage.scala:108:29] wire [15:0] ren2_uops_1_br_mask; // @[rename-stage.scala:108:29] wire [3:0] ren2_uops_1_br_tag; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_1_ftq_idx; // @[rename-stage.scala:108:29] wire ren2_uops_1_edge_inst; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_1_pc_lob; // @[rename-stage.scala:108:29] wire ren2_uops_1_taken; // @[rename-stage.scala:108:29] wire [19:0] ren2_uops_1_imm_packed; // @[rename-stage.scala:108:29] wire [11:0] ren2_uops_1_csr_addr; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_1_rob_idx; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_1_ldq_idx; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_1_stq_idx; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_1_rxq_idx; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_1_pdst; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_1_prs1; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_1_prs2; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_1_prs3; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_1_ppred; // @[rename-stage.scala:108:29] wire ren2_uops_1_prs1_busy; // @[rename-stage.scala:108:29] wire ren2_uops_1_prs2_busy; // @[rename-stage.scala:108:29] wire ren2_uops_1_prs3_busy; // @[rename-stage.scala:108:29] wire ren2_uops_1_ppred_busy; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_1_stale_pdst; // @[rename-stage.scala:108:29] wire ren2_uops_1_exception; // @[rename-stage.scala:108:29] wire [63:0] ren2_uops_1_exc_cause; // @[rename-stage.scala:108:29] wire ren2_uops_1_bypassable; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_1_mem_cmd; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_1_mem_size; // @[rename-stage.scala:108:29] wire ren2_uops_1_mem_signed; // @[rename-stage.scala:108:29] wire ren2_uops_1_is_fence; // @[rename-stage.scala:108:29] wire ren2_uops_1_is_fencei; // @[rename-stage.scala:108:29] wire ren2_uops_1_is_amo; // @[rename-stage.scala:108:29] wire ren2_uops_1_uses_ldq; // @[rename-stage.scala:108:29] wire ren2_uops_1_uses_stq; // @[rename-stage.scala:108:29] wire ren2_uops_1_is_sys_pc2epc; // @[rename-stage.scala:108:29] wire ren2_uops_1_is_unique; // @[rename-stage.scala:108:29] wire ren2_uops_1_flush_on_commit; // @[rename-stage.scala:108:29] wire ren2_uops_1_ldst_is_rs1; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_1_ldst; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_1_lrs1; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_1_lrs2; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_1_lrs3; // @[rename-stage.scala:108:29] wire ren2_uops_1_ldst_val; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_1_dst_rtype; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_1_lrs1_rtype; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_1_lrs2_rtype; // @[rename-stage.scala:108:29] wire ren2_uops_1_frs3_en; // @[rename-stage.scala:108:29] wire ren2_uops_1_fp_val; // @[rename-stage.scala:108:29] wire ren2_uops_1_fp_single; // @[rename-stage.scala:108:29] wire ren2_uops_1_xcpt_pf_if; // @[rename-stage.scala:108:29] wire ren2_uops_1_xcpt_ae_if; // @[rename-stage.scala:108:29] wire ren2_uops_1_xcpt_ma_if; // @[rename-stage.scala:108:29] wire ren2_uops_1_bp_debug_if; // @[rename-stage.scala:108:29] wire ren2_uops_1_bp_xcpt_if; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_1_debug_fsrc; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_1_debug_tsrc; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_2_uopc; // @[rename-stage.scala:108:29] wire [31:0] ren2_uops_2_inst; // @[rename-stage.scala:108:29] wire [31:0] ren2_uops_2_debug_inst; // @[rename-stage.scala:108:29] wire ren2_uops_2_is_rvc; // @[rename-stage.scala:108:29] wire [39:0] ren2_uops_2_debug_pc; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_2_iq_type; // @[rename-stage.scala:108:29] wire [9:0] ren2_uops_2_fu_code; // @[rename-stage.scala:108:29] wire [3:0] ren2_uops_2_ctrl_br_type; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_2_ctrl_op1_sel; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_2_ctrl_op2_sel; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_2_ctrl_imm_sel; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_2_ctrl_op_fcn; // @[rename-stage.scala:108:29] wire ren2_uops_2_ctrl_fcn_dw; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_2_ctrl_csr_cmd; // @[rename-stage.scala:108:29] wire ren2_uops_2_ctrl_is_load; // @[rename-stage.scala:108:29] wire ren2_uops_2_ctrl_is_sta; // @[rename-stage.scala:108:29] wire ren2_uops_2_ctrl_is_std; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_2_iw_state; // @[rename-stage.scala:108:29] wire ren2_uops_2_iw_p1_poisoned; // @[rename-stage.scala:108:29] wire ren2_uops_2_iw_p2_poisoned; // @[rename-stage.scala:108:29] wire ren2_uops_2_is_br; // @[rename-stage.scala:108:29] wire ren2_uops_2_is_jalr; // @[rename-stage.scala:108:29] wire ren2_uops_2_is_jal; // @[rename-stage.scala:108:29] wire ren2_uops_2_is_sfb; // @[rename-stage.scala:108:29] wire [15:0] ren2_uops_2_br_mask; // @[rename-stage.scala:108:29] wire [3:0] ren2_uops_2_br_tag; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_2_ftq_idx; // @[rename-stage.scala:108:29] wire ren2_uops_2_edge_inst; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_2_pc_lob; // @[rename-stage.scala:108:29] wire ren2_uops_2_taken; // @[rename-stage.scala:108:29] wire [19:0] ren2_uops_2_imm_packed; // @[rename-stage.scala:108:29] wire [11:0] ren2_uops_2_csr_addr; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_2_rob_idx; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_2_ldq_idx; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_2_stq_idx; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_2_rxq_idx; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_2_pdst; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_2_prs1; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_2_prs2; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_2_prs3; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_2_ppred; // @[rename-stage.scala:108:29] wire ren2_uops_2_prs1_busy; // @[rename-stage.scala:108:29] wire ren2_uops_2_prs2_busy; // @[rename-stage.scala:108:29] wire ren2_uops_2_prs3_busy; // @[rename-stage.scala:108:29] wire ren2_uops_2_ppred_busy; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_2_stale_pdst; // @[rename-stage.scala:108:29] wire ren2_uops_2_exception; // @[rename-stage.scala:108:29] wire [63:0] ren2_uops_2_exc_cause; // @[rename-stage.scala:108:29] wire ren2_uops_2_bypassable; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_2_mem_cmd; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_2_mem_size; // @[rename-stage.scala:108:29] wire ren2_uops_2_mem_signed; // @[rename-stage.scala:108:29] wire ren2_uops_2_is_fence; // @[rename-stage.scala:108:29] wire ren2_uops_2_is_fencei; // @[rename-stage.scala:108:29] wire ren2_uops_2_is_amo; // @[rename-stage.scala:108:29] wire ren2_uops_2_uses_ldq; // @[rename-stage.scala:108:29] wire ren2_uops_2_uses_stq; // @[rename-stage.scala:108:29] wire ren2_uops_2_is_sys_pc2epc; // @[rename-stage.scala:108:29] wire ren2_uops_2_is_unique; // @[rename-stage.scala:108:29] wire ren2_uops_2_flush_on_commit; // @[rename-stage.scala:108:29] wire ren2_uops_2_ldst_is_rs1; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_2_ldst; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_2_lrs1; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_2_lrs2; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_2_lrs3; // @[rename-stage.scala:108:29] wire ren2_uops_2_ldst_val; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_2_dst_rtype; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_2_lrs1_rtype; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_2_lrs2_rtype; // @[rename-stage.scala:108:29] wire ren2_uops_2_frs3_en; // @[rename-stage.scala:108:29] wire ren2_uops_2_fp_val; // @[rename-stage.scala:108:29] wire ren2_uops_2_fp_single; // @[rename-stage.scala:108:29] wire ren2_uops_2_xcpt_pf_if; // @[rename-stage.scala:108:29] wire ren2_uops_2_xcpt_ae_if; // @[rename-stage.scala:108:29] wire ren2_uops_2_xcpt_ma_if; // @[rename-stage.scala:108:29] wire ren2_uops_2_bp_debug_if; // @[rename-stage.scala:108:29] wire ren2_uops_2_bp_xcpt_if; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_2_debug_fsrc; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_2_debug_tsrc; // @[rename-stage.scala:108:29] wire io_ren2_mask_0; // @[rename-stage.scala:356:7] wire io_ren2_mask_1; // @[rename-stage.scala:356:7] wire io_ren2_mask_2; // @[rename-stage.scala:356:7] wire [3:0] io_ren2_uops_0_ctrl_br_type; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_0_ctrl_op1_sel; // @[rename-stage.scala:356:7] wire [2:0] io_ren2_uops_0_ctrl_op2_sel; // @[rename-stage.scala:356:7] wire [2:0] io_ren2_uops_0_ctrl_imm_sel; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_0_ctrl_op_fcn; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:356:7] wire [2:0] io_ren2_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_ctrl_is_load; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_ctrl_is_sta; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_ctrl_is_std; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_0_uopc; // @[rename-stage.scala:356:7] wire [31:0] io_ren2_uops_0_inst; // @[rename-stage.scala:356:7] wire [31:0] io_ren2_uops_0_debug_inst; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_is_rvc; // @[rename-stage.scala:356:7] wire [39:0] io_ren2_uops_0_debug_pc; // @[rename-stage.scala:356:7] wire [2:0] io_ren2_uops_0_iq_type; // @[rename-stage.scala:356:7] wire [9:0] io_ren2_uops_0_fu_code; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_0_iw_state; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_iw_p1_poisoned; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_iw_p2_poisoned; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_is_br; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_is_jalr; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_is_jal; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_is_sfb; // @[rename-stage.scala:356:7] wire [15:0] io_ren2_uops_0_br_mask; // @[rename-stage.scala:356:7] wire [3:0] io_ren2_uops_0_br_tag; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_0_ftq_idx; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_edge_inst; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_0_pc_lob; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_taken; // @[rename-stage.scala:356:7] wire [19:0] io_ren2_uops_0_imm_packed; // @[rename-stage.scala:356:7] wire [11:0] io_ren2_uops_0_csr_addr; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_0_rob_idx; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_0_ldq_idx; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_0_stq_idx; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_0_rxq_idx; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_0_pdst; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_0_prs1; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_0_prs2; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_0_prs3; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_0_ppred; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_prs1_busy; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_prs2_busy; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_prs3_busy; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_ppred_busy; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_0_stale_pdst; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_exception; // @[rename-stage.scala:356:7] wire [63:0] io_ren2_uops_0_exc_cause; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_bypassable; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_0_mem_cmd; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_0_mem_size; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_mem_signed; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_is_fence; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_is_fencei; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_is_amo; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_uses_ldq; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_uses_stq; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_is_sys_pc2epc; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_is_unique; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_flush_on_commit; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_ldst_is_rs1; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_0_ldst; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_0_lrs1; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_0_lrs2; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_0_lrs3; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_ldst_val; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_0_dst_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_0_lrs1_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_0_lrs2_rtype; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_frs3_en; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_fp_val; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_fp_single; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_xcpt_pf_if; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_xcpt_ae_if; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_xcpt_ma_if; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_bp_debug_if; // @[rename-stage.scala:356:7] wire io_ren2_uops_0_bp_xcpt_if; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_0_debug_fsrc; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_0_debug_tsrc; // @[rename-stage.scala:356:7] wire [3:0] io_ren2_uops_1_ctrl_br_type; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_1_ctrl_op1_sel; // @[rename-stage.scala:356:7] wire [2:0] io_ren2_uops_1_ctrl_op2_sel; // @[rename-stage.scala:356:7] wire [2:0] io_ren2_uops_1_ctrl_imm_sel; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_1_ctrl_op_fcn; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_ctrl_fcn_dw; // @[rename-stage.scala:356:7] wire [2:0] io_ren2_uops_1_ctrl_csr_cmd; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_ctrl_is_load; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_ctrl_is_sta; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_ctrl_is_std; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_1_uopc; // @[rename-stage.scala:356:7] wire [31:0] io_ren2_uops_1_inst; // @[rename-stage.scala:356:7] wire [31:0] io_ren2_uops_1_debug_inst; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_is_rvc; // @[rename-stage.scala:356:7] wire [39:0] io_ren2_uops_1_debug_pc; // @[rename-stage.scala:356:7] wire [2:0] io_ren2_uops_1_iq_type; // @[rename-stage.scala:356:7] wire [9:0] io_ren2_uops_1_fu_code; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_1_iw_state; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_iw_p1_poisoned; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_iw_p2_poisoned; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_is_br; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_is_jalr; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_is_jal; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_is_sfb; // @[rename-stage.scala:356:7] wire [15:0] io_ren2_uops_1_br_mask; // @[rename-stage.scala:356:7] wire [3:0] io_ren2_uops_1_br_tag; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_1_ftq_idx; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_edge_inst; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_1_pc_lob; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_taken; // @[rename-stage.scala:356:7] wire [19:0] io_ren2_uops_1_imm_packed; // @[rename-stage.scala:356:7] wire [11:0] io_ren2_uops_1_csr_addr; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_1_rob_idx; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_1_ldq_idx; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_1_stq_idx; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_1_rxq_idx; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_1_pdst; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_1_prs1; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_1_prs2; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_1_prs3; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_1_ppred; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_prs1_busy; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_prs2_busy; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_prs3_busy; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_ppred_busy; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_1_stale_pdst; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_exception; // @[rename-stage.scala:356:7] wire [63:0] io_ren2_uops_1_exc_cause; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_bypassable; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_1_mem_cmd; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_1_mem_size; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_mem_signed; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_is_fence; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_is_fencei; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_is_amo; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_uses_ldq; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_uses_stq; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_is_sys_pc2epc; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_is_unique; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_flush_on_commit; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_ldst_is_rs1; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_1_ldst; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_1_lrs1; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_1_lrs2; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_1_lrs3; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_ldst_val; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_1_dst_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_1_lrs1_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_1_lrs2_rtype; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_frs3_en; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_fp_val; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_fp_single; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_xcpt_pf_if; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_xcpt_ae_if; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_xcpt_ma_if; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_bp_debug_if; // @[rename-stage.scala:356:7] wire io_ren2_uops_1_bp_xcpt_if; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_1_debug_fsrc; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_1_debug_tsrc; // @[rename-stage.scala:356:7] wire [3:0] io_ren2_uops_2_ctrl_br_type; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_2_ctrl_op1_sel; // @[rename-stage.scala:356:7] wire [2:0] io_ren2_uops_2_ctrl_op2_sel; // @[rename-stage.scala:356:7] wire [2:0] io_ren2_uops_2_ctrl_imm_sel; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_2_ctrl_op_fcn; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_ctrl_fcn_dw; // @[rename-stage.scala:356:7] wire [2:0] io_ren2_uops_2_ctrl_csr_cmd; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_ctrl_is_load; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_ctrl_is_sta; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_ctrl_is_std; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_2_uopc; // @[rename-stage.scala:356:7] wire [31:0] io_ren2_uops_2_inst; // @[rename-stage.scala:356:7] wire [31:0] io_ren2_uops_2_debug_inst; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_is_rvc; // @[rename-stage.scala:356:7] wire [39:0] io_ren2_uops_2_debug_pc; // @[rename-stage.scala:356:7] wire [2:0] io_ren2_uops_2_iq_type; // @[rename-stage.scala:356:7] wire [9:0] io_ren2_uops_2_fu_code; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_2_iw_state; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_iw_p1_poisoned; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_iw_p2_poisoned; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_is_br; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_is_jalr; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_is_jal; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_is_sfb; // @[rename-stage.scala:356:7] wire [15:0] io_ren2_uops_2_br_mask; // @[rename-stage.scala:356:7] wire [3:0] io_ren2_uops_2_br_tag; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_2_ftq_idx; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_edge_inst; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_2_pc_lob; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_taken; // @[rename-stage.scala:356:7] wire [19:0] io_ren2_uops_2_imm_packed; // @[rename-stage.scala:356:7] wire [11:0] io_ren2_uops_2_csr_addr; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_2_rob_idx; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_2_ldq_idx; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_2_stq_idx; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_2_rxq_idx; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_2_pdst; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_2_prs1; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_2_prs2; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_2_prs3; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_2_ppred; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_prs1_busy; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_prs2_busy; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_prs3_busy; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_ppred_busy; // @[rename-stage.scala:356:7] wire [6:0] io_ren2_uops_2_stale_pdst; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_exception; // @[rename-stage.scala:356:7] wire [63:0] io_ren2_uops_2_exc_cause; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_bypassable; // @[rename-stage.scala:356:7] wire [4:0] io_ren2_uops_2_mem_cmd; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_2_mem_size; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_mem_signed; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_is_fence; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_is_fencei; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_is_amo; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_uses_ldq; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_uses_stq; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_is_sys_pc2epc; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_is_unique; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_flush_on_commit; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_ldst_is_rs1; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_2_ldst; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_2_lrs1; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_2_lrs2; // @[rename-stage.scala:356:7] wire [5:0] io_ren2_uops_2_lrs3; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_ldst_val; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_2_dst_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_2_lrs1_rtype; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_2_lrs2_rtype; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_frs3_en; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_fp_val; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_fp_single; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_xcpt_pf_if; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_xcpt_ae_if; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_xcpt_ma_if; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_bp_debug_if; // @[rename-stage.scala:356:7] wire io_ren2_uops_2_bp_xcpt_if; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_2_debug_fsrc; // @[rename-stage.scala:356:7] wire [1:0] io_ren2_uops_2_debug_tsrc; // @[rename-stage.scala:356:7] assign io_ren2_mask_0 = ren2_valids_0; // @[rename-stage.scala:107:29, :356:7] assign io_ren2_mask_1 = ren2_valids_1; // @[rename-stage.scala:107:29, :356:7] assign io_ren2_mask_2 = ren2_valids_2; // @[rename-stage.scala:107:29, :356:7] assign io_ren2_uops_0_uopc = ren2_uops_0_uopc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_inst = ren2_uops_0_inst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_debug_inst = ren2_uops_0_debug_inst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_is_rvc = ren2_uops_0_is_rvc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_debug_pc = ren2_uops_0_debug_pc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_iq_type = ren2_uops_0_iq_type; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_fu_code = ren2_uops_0_fu_code; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ctrl_br_type = ren2_uops_0_ctrl_br_type; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ctrl_op1_sel = ren2_uops_0_ctrl_op1_sel; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ctrl_op2_sel = ren2_uops_0_ctrl_op2_sel; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ctrl_imm_sel = ren2_uops_0_ctrl_imm_sel; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ctrl_op_fcn = ren2_uops_0_ctrl_op_fcn; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ctrl_fcn_dw = ren2_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ctrl_csr_cmd = ren2_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ctrl_is_load = ren2_uops_0_ctrl_is_load; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ctrl_is_sta = ren2_uops_0_ctrl_is_sta; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ctrl_is_std = ren2_uops_0_ctrl_is_std; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_iw_state = ren2_uops_0_iw_state; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_iw_p1_poisoned = ren2_uops_0_iw_p1_poisoned; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_iw_p2_poisoned = ren2_uops_0_iw_p2_poisoned; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_is_br = ren2_uops_0_is_br; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_is_jalr = ren2_uops_0_is_jalr; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_is_jal = ren2_uops_0_is_jal; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_is_sfb = ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_br_mask = ren2_uops_0_br_mask; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_br_tag = ren2_uops_0_br_tag; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ftq_idx = ren2_uops_0_ftq_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_edge_inst = ren2_uops_0_edge_inst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_pc_lob = ren2_uops_0_pc_lob; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_taken = ren2_uops_0_taken; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_imm_packed = ren2_uops_0_imm_packed; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_csr_addr = ren2_uops_0_csr_addr; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_rob_idx = ren2_uops_0_rob_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ldq_idx = ren2_uops_0_ldq_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_stq_idx = ren2_uops_0_stq_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_rxq_idx = ren2_uops_0_rxq_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_pdst = ren2_uops_0_pdst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_prs1 = ren2_uops_0_prs1; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_prs2 = ren2_uops_0_prs2; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_prs3 = ren2_uops_0_prs3; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ppred = ren2_uops_0_ppred; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_prs1_busy = ren2_uops_0_prs1_busy; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_prs2_busy = ren2_uops_0_prs2_busy; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_prs3_busy = ren2_uops_0_prs3_busy; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ppred_busy = ren2_uops_0_ppred_busy; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_stale_pdst = ren2_uops_0_stale_pdst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_exception = ren2_uops_0_exception; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_exc_cause = ren2_uops_0_exc_cause; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_bypassable = ren2_uops_0_bypassable; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_mem_cmd = ren2_uops_0_mem_cmd; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_mem_size = ren2_uops_0_mem_size; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_mem_signed = ren2_uops_0_mem_signed; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_is_fence = ren2_uops_0_is_fence; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_is_fencei = ren2_uops_0_is_fencei; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_is_amo = ren2_uops_0_is_amo; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_uses_ldq = ren2_uops_0_uses_ldq; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_uses_stq = ren2_uops_0_uses_stq; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_is_sys_pc2epc = ren2_uops_0_is_sys_pc2epc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_is_unique = ren2_uops_0_is_unique; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_flush_on_commit = ren2_uops_0_flush_on_commit; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ldst_is_rs1 = ren2_uops_0_ldst_is_rs1; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ldst = ren2_uops_0_ldst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_lrs1 = ren2_uops_0_lrs1; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_lrs2 = ren2_uops_0_lrs2; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_lrs3 = ren2_uops_0_lrs3; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_ldst_val = ren2_uops_0_ldst_val; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_dst_rtype = ren2_uops_0_dst_rtype; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_lrs1_rtype = ren2_uops_0_lrs1_rtype; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_lrs2_rtype = ren2_uops_0_lrs2_rtype; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_frs3_en = ren2_uops_0_frs3_en; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_fp_val = ren2_uops_0_fp_val; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_fp_single = ren2_uops_0_fp_single; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_xcpt_pf_if = ren2_uops_0_xcpt_pf_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_xcpt_ae_if = ren2_uops_0_xcpt_ae_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_xcpt_ma_if = ren2_uops_0_xcpt_ma_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_bp_debug_if = ren2_uops_0_bp_debug_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_bp_xcpt_if = ren2_uops_0_bp_xcpt_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_debug_fsrc = ren2_uops_0_debug_fsrc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_0_debug_tsrc = ren2_uops_0_debug_tsrc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_uopc = ren2_uops_1_uopc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_inst = ren2_uops_1_inst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_debug_inst = ren2_uops_1_debug_inst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_is_rvc = ren2_uops_1_is_rvc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_debug_pc = ren2_uops_1_debug_pc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_iq_type = ren2_uops_1_iq_type; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_fu_code = ren2_uops_1_fu_code; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ctrl_br_type = ren2_uops_1_ctrl_br_type; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ctrl_op1_sel = ren2_uops_1_ctrl_op1_sel; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ctrl_op2_sel = ren2_uops_1_ctrl_op2_sel; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ctrl_imm_sel = ren2_uops_1_ctrl_imm_sel; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ctrl_op_fcn = ren2_uops_1_ctrl_op_fcn; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ctrl_fcn_dw = ren2_uops_1_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ctrl_csr_cmd = ren2_uops_1_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ctrl_is_load = ren2_uops_1_ctrl_is_load; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ctrl_is_sta = ren2_uops_1_ctrl_is_sta; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ctrl_is_std = ren2_uops_1_ctrl_is_std; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_iw_state = ren2_uops_1_iw_state; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_iw_p1_poisoned = ren2_uops_1_iw_p1_poisoned; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_iw_p2_poisoned = ren2_uops_1_iw_p2_poisoned; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_is_br = ren2_uops_1_is_br; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_is_jalr = ren2_uops_1_is_jalr; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_is_jal = ren2_uops_1_is_jal; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_is_sfb = ren2_uops_1_is_sfb; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_br_mask = ren2_uops_1_br_mask; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_br_tag = ren2_uops_1_br_tag; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ftq_idx = ren2_uops_1_ftq_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_edge_inst = ren2_uops_1_edge_inst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_pc_lob = ren2_uops_1_pc_lob; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_taken = ren2_uops_1_taken; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_imm_packed = ren2_uops_1_imm_packed; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_csr_addr = ren2_uops_1_csr_addr; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_rob_idx = ren2_uops_1_rob_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ldq_idx = ren2_uops_1_ldq_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_stq_idx = ren2_uops_1_stq_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_rxq_idx = ren2_uops_1_rxq_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_pdst = ren2_uops_1_pdst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_prs1 = ren2_uops_1_prs1; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_prs2 = ren2_uops_1_prs2; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_prs3 = ren2_uops_1_prs3; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ppred = ren2_uops_1_ppred; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_prs1_busy = ren2_uops_1_prs1_busy; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_prs2_busy = ren2_uops_1_prs2_busy; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_prs3_busy = ren2_uops_1_prs3_busy; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ppred_busy = ren2_uops_1_ppred_busy; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_stale_pdst = ren2_uops_1_stale_pdst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_exception = ren2_uops_1_exception; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_exc_cause = ren2_uops_1_exc_cause; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_bypassable = ren2_uops_1_bypassable; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_mem_cmd = ren2_uops_1_mem_cmd; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_mem_size = ren2_uops_1_mem_size; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_mem_signed = ren2_uops_1_mem_signed; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_is_fence = ren2_uops_1_is_fence; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_is_fencei = ren2_uops_1_is_fencei; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_is_amo = ren2_uops_1_is_amo; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_uses_ldq = ren2_uops_1_uses_ldq; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_uses_stq = ren2_uops_1_uses_stq; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_is_sys_pc2epc = ren2_uops_1_is_sys_pc2epc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_is_unique = ren2_uops_1_is_unique; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_flush_on_commit = ren2_uops_1_flush_on_commit; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ldst_is_rs1 = ren2_uops_1_ldst_is_rs1; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ldst = ren2_uops_1_ldst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_lrs1 = ren2_uops_1_lrs1; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_lrs2 = ren2_uops_1_lrs2; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_lrs3 = ren2_uops_1_lrs3; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_ldst_val = ren2_uops_1_ldst_val; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_dst_rtype = ren2_uops_1_dst_rtype; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_lrs1_rtype = ren2_uops_1_lrs1_rtype; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_lrs2_rtype = ren2_uops_1_lrs2_rtype; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_frs3_en = ren2_uops_1_frs3_en; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_fp_val = ren2_uops_1_fp_val; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_fp_single = ren2_uops_1_fp_single; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_xcpt_pf_if = ren2_uops_1_xcpt_pf_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_xcpt_ae_if = ren2_uops_1_xcpt_ae_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_xcpt_ma_if = ren2_uops_1_xcpt_ma_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_bp_debug_if = ren2_uops_1_bp_debug_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_bp_xcpt_if = ren2_uops_1_bp_xcpt_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_debug_fsrc = ren2_uops_1_debug_fsrc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_1_debug_tsrc = ren2_uops_1_debug_tsrc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_uopc = ren2_uops_2_uopc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_inst = ren2_uops_2_inst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_debug_inst = ren2_uops_2_debug_inst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_is_rvc = ren2_uops_2_is_rvc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_debug_pc = ren2_uops_2_debug_pc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_iq_type = ren2_uops_2_iq_type; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_fu_code = ren2_uops_2_fu_code; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ctrl_br_type = ren2_uops_2_ctrl_br_type; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ctrl_op1_sel = ren2_uops_2_ctrl_op1_sel; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ctrl_op2_sel = ren2_uops_2_ctrl_op2_sel; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ctrl_imm_sel = ren2_uops_2_ctrl_imm_sel; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ctrl_op_fcn = ren2_uops_2_ctrl_op_fcn; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ctrl_fcn_dw = ren2_uops_2_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ctrl_csr_cmd = ren2_uops_2_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ctrl_is_load = ren2_uops_2_ctrl_is_load; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ctrl_is_sta = ren2_uops_2_ctrl_is_sta; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ctrl_is_std = ren2_uops_2_ctrl_is_std; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_iw_state = ren2_uops_2_iw_state; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_iw_p1_poisoned = ren2_uops_2_iw_p1_poisoned; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_iw_p2_poisoned = ren2_uops_2_iw_p2_poisoned; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_is_br = ren2_uops_2_is_br; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_is_jalr = ren2_uops_2_is_jalr; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_is_jal = ren2_uops_2_is_jal; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_is_sfb = ren2_uops_2_is_sfb; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_br_mask = ren2_uops_2_br_mask; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_br_tag = ren2_uops_2_br_tag; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ftq_idx = ren2_uops_2_ftq_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_edge_inst = ren2_uops_2_edge_inst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_pc_lob = ren2_uops_2_pc_lob; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_taken = ren2_uops_2_taken; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_imm_packed = ren2_uops_2_imm_packed; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_csr_addr = ren2_uops_2_csr_addr; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_rob_idx = ren2_uops_2_rob_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ldq_idx = ren2_uops_2_ldq_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_stq_idx = ren2_uops_2_stq_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_rxq_idx = ren2_uops_2_rxq_idx; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_pdst = ren2_uops_2_pdst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_prs1 = ren2_uops_2_prs1; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_prs2 = ren2_uops_2_prs2; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_prs3 = ren2_uops_2_prs3; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ppred = ren2_uops_2_ppred; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_prs1_busy = ren2_uops_2_prs1_busy; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_prs2_busy = ren2_uops_2_prs2_busy; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_prs3_busy = ren2_uops_2_prs3_busy; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ppred_busy = ren2_uops_2_ppred_busy; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_stale_pdst = ren2_uops_2_stale_pdst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_exception = ren2_uops_2_exception; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_exc_cause = ren2_uops_2_exc_cause; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_bypassable = ren2_uops_2_bypassable; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_mem_cmd = ren2_uops_2_mem_cmd; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_mem_size = ren2_uops_2_mem_size; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_mem_signed = ren2_uops_2_mem_signed; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_is_fence = ren2_uops_2_is_fence; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_is_fencei = ren2_uops_2_is_fencei; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_is_amo = ren2_uops_2_is_amo; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_uses_ldq = ren2_uops_2_uses_ldq; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_uses_stq = ren2_uops_2_uses_stq; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_is_sys_pc2epc = ren2_uops_2_is_sys_pc2epc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_is_unique = ren2_uops_2_is_unique; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_flush_on_commit = ren2_uops_2_flush_on_commit; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ldst_is_rs1 = ren2_uops_2_ldst_is_rs1; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ldst = ren2_uops_2_ldst; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_lrs1 = ren2_uops_2_lrs1; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_lrs2 = ren2_uops_2_lrs2; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_lrs3 = ren2_uops_2_lrs3; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_ldst_val = ren2_uops_2_ldst_val; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_dst_rtype = ren2_uops_2_dst_rtype; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_lrs1_rtype = ren2_uops_2_lrs1_rtype; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_lrs2_rtype = ren2_uops_2_lrs2_rtype; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_frs3_en = ren2_uops_2_frs3_en; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_fp_val = ren2_uops_2_fp_val; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_fp_single = ren2_uops_2_fp_single; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_xcpt_pf_if = ren2_uops_2_xcpt_pf_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_xcpt_ae_if = ren2_uops_2_xcpt_ae_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_xcpt_ma_if = ren2_uops_2_xcpt_ma_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_bp_debug_if = ren2_uops_2_bp_debug_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_bp_xcpt_if = ren2_uops_2_bp_xcpt_if; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_debug_fsrc = ren2_uops_2_debug_fsrc; // @[rename-stage.scala:108:29, :356:7] assign io_ren2_uops_2_debug_tsrc = ren2_uops_2_debug_tsrc; // @[rename-stage.scala:108:29, :356:7] reg r_valid; // @[rename-stage.scala:121:27] assign ren2_valids_0 = r_valid; // @[rename-stage.scala:107:29, :121:27] reg [6:0] r_uop_uopc; // @[rename-stage.scala:122:23] assign ren2_uops_0_uopc = r_uop_uopc; // @[rename-stage.scala:108:29, :122:23] reg [31:0] r_uop_inst; // @[rename-stage.scala:122:23] assign ren2_uops_0_inst = r_uop_inst; // @[rename-stage.scala:108:29, :122:23] reg [31:0] r_uop_debug_inst; // @[rename-stage.scala:122:23] assign ren2_uops_0_debug_inst = r_uop_debug_inst; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_rvc; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_rvc = r_uop_is_rvc; // @[rename-stage.scala:108:29, :122:23] reg [39:0] r_uop_debug_pc; // @[rename-stage.scala:122:23] assign ren2_uops_0_debug_pc = r_uop_debug_pc; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_iq_type; // @[rename-stage.scala:122:23] assign ren2_uops_0_iq_type = r_uop_iq_type; // @[rename-stage.scala:108:29, :122:23] reg [9:0] r_uop_fu_code; // @[rename-stage.scala:122:23] assign ren2_uops_0_fu_code = r_uop_fu_code; // @[rename-stage.scala:108:29, :122:23] reg [3:0] r_uop_ctrl_br_type; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_br_type = r_uop_ctrl_br_type; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_ctrl_op1_sel; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_op1_sel = r_uop_ctrl_op1_sel; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_ctrl_op2_sel; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_op2_sel = r_uop_ctrl_op2_sel; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_ctrl_imm_sel; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_imm_sel = r_uop_ctrl_imm_sel; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_ctrl_op_fcn; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_op_fcn = r_uop_ctrl_op_fcn; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ctrl_fcn_dw; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_fcn_dw = r_uop_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_ctrl_csr_cmd; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_csr_cmd = r_uop_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ctrl_is_load; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_is_load = r_uop_ctrl_is_load; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ctrl_is_sta; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_is_sta = r_uop_ctrl_is_sta; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ctrl_is_std; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_is_std = r_uop_ctrl_is_std; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_iw_state; // @[rename-stage.scala:122:23] assign ren2_uops_0_iw_state = r_uop_iw_state; // @[rename-stage.scala:108:29, :122:23] reg r_uop_iw_p1_poisoned; // @[rename-stage.scala:122:23] assign ren2_uops_0_iw_p1_poisoned = r_uop_iw_p1_poisoned; // @[rename-stage.scala:108:29, :122:23] reg r_uop_iw_p2_poisoned; // @[rename-stage.scala:122:23] assign ren2_uops_0_iw_p2_poisoned = r_uop_iw_p2_poisoned; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_br; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_br = r_uop_is_br; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_jalr; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_jalr = r_uop_is_jalr; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_jal; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_jal = r_uop_is_jal; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_sfb; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_sfb = r_uop_is_sfb; // @[rename-stage.scala:108:29, :122:23] reg [15:0] r_uop_br_mask; // @[rename-stage.scala:122:23] assign ren2_uops_0_br_mask = r_uop_br_mask; // @[rename-stage.scala:108:29, :122:23] reg [3:0] r_uop_br_tag; // @[rename-stage.scala:122:23] assign ren2_uops_0_br_tag = r_uop_br_tag; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_ftq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_0_ftq_idx = r_uop_ftq_idx; // @[rename-stage.scala:108:29, :122:23] reg r_uop_edge_inst; // @[rename-stage.scala:122:23] assign ren2_uops_0_edge_inst = r_uop_edge_inst; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_pc_lob; // @[rename-stage.scala:122:23] assign ren2_uops_0_pc_lob = r_uop_pc_lob; // @[rename-stage.scala:108:29, :122:23] reg r_uop_taken; // @[rename-stage.scala:122:23] assign ren2_uops_0_taken = r_uop_taken; // @[rename-stage.scala:108:29, :122:23] reg [19:0] r_uop_imm_packed; // @[rename-stage.scala:122:23] assign ren2_uops_0_imm_packed = r_uop_imm_packed; // @[rename-stage.scala:108:29, :122:23] reg [11:0] r_uop_csr_addr; // @[rename-stage.scala:122:23] assign ren2_uops_0_csr_addr = r_uop_csr_addr; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_rob_idx; // @[rename-stage.scala:122:23] assign ren2_uops_0_rob_idx = r_uop_rob_idx; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_ldq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_0_ldq_idx = r_uop_ldq_idx; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_stq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_0_stq_idx = r_uop_stq_idx; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_rxq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_0_rxq_idx = r_uop_rxq_idx; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_pdst; // @[rename-stage.scala:122:23] assign ren2_uops_0_pdst = r_uop_pdst; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_prs1; // @[rename-stage.scala:122:23] assign ren2_uops_0_prs1 = r_uop_prs1; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_prs2; // @[rename-stage.scala:122:23] assign ren2_uops_0_prs2 = r_uop_prs2; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_prs3; // @[rename-stage.scala:122:23] assign ren2_uops_0_prs3 = r_uop_prs3; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_ppred; // @[rename-stage.scala:122:23] assign ren2_uops_0_ppred = r_uop_ppred; // @[rename-stage.scala:108:29, :122:23] reg r_uop_prs1_busy; // @[rename-stage.scala:122:23] assign ren2_uops_0_prs1_busy = r_uop_prs1_busy; // @[rename-stage.scala:108:29, :122:23] reg r_uop_prs2_busy; // @[rename-stage.scala:122:23] assign ren2_uops_0_prs2_busy = r_uop_prs2_busy; // @[rename-stage.scala:108:29, :122:23] reg r_uop_prs3_busy; // @[rename-stage.scala:122:23] assign ren2_uops_0_prs3_busy = r_uop_prs3_busy; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ppred_busy; // @[rename-stage.scala:122:23] assign ren2_uops_0_ppred_busy = r_uop_ppred_busy; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_stale_pdst; // @[rename-stage.scala:122:23] assign ren2_uops_0_stale_pdst = r_uop_stale_pdst; // @[rename-stage.scala:108:29, :122:23] reg r_uop_exception; // @[rename-stage.scala:122:23] assign ren2_uops_0_exception = r_uop_exception; // @[rename-stage.scala:108:29, :122:23] reg [63:0] r_uop_exc_cause; // @[rename-stage.scala:122:23] assign ren2_uops_0_exc_cause = r_uop_exc_cause; // @[rename-stage.scala:108:29, :122:23] reg r_uop_bypassable; // @[rename-stage.scala:122:23] assign ren2_uops_0_bypassable = r_uop_bypassable; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_mem_cmd; // @[rename-stage.scala:122:23] assign ren2_uops_0_mem_cmd = r_uop_mem_cmd; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_mem_size; // @[rename-stage.scala:122:23] assign ren2_uops_0_mem_size = r_uop_mem_size; // @[rename-stage.scala:108:29, :122:23] reg r_uop_mem_signed; // @[rename-stage.scala:122:23] assign ren2_uops_0_mem_signed = r_uop_mem_signed; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_fence; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_fence = r_uop_is_fence; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_fencei; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_fencei = r_uop_is_fencei; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_amo; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_amo = r_uop_is_amo; // @[rename-stage.scala:108:29, :122:23] reg r_uop_uses_ldq; // @[rename-stage.scala:122:23] assign ren2_uops_0_uses_ldq = r_uop_uses_ldq; // @[rename-stage.scala:108:29, :122:23] reg r_uop_uses_stq; // @[rename-stage.scala:122:23] assign ren2_uops_0_uses_stq = r_uop_uses_stq; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_sys_pc2epc; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_sys_pc2epc = r_uop_is_sys_pc2epc; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_unique; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_unique = r_uop_is_unique; // @[rename-stage.scala:108:29, :122:23] reg r_uop_flush_on_commit; // @[rename-stage.scala:122:23] assign ren2_uops_0_flush_on_commit = r_uop_flush_on_commit; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ldst_is_rs1; // @[rename-stage.scala:122:23] assign ren2_uops_0_ldst_is_rs1 = r_uop_ldst_is_rs1; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_ldst; // @[rename-stage.scala:122:23] assign ren2_uops_0_ldst = r_uop_ldst; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_lrs1; // @[rename-stage.scala:122:23] assign ren2_uops_0_lrs1 = r_uop_lrs1; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_lrs2; // @[rename-stage.scala:122:23] assign ren2_uops_0_lrs2 = r_uop_lrs2; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_lrs3; // @[rename-stage.scala:122:23] assign ren2_uops_0_lrs3 = r_uop_lrs3; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ldst_val; // @[rename-stage.scala:122:23] assign ren2_uops_0_ldst_val = r_uop_ldst_val; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_dst_rtype; // @[rename-stage.scala:122:23] assign ren2_uops_0_dst_rtype = r_uop_dst_rtype; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_lrs1_rtype; // @[rename-stage.scala:122:23] assign ren2_uops_0_lrs1_rtype = r_uop_lrs1_rtype; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_lrs2_rtype; // @[rename-stage.scala:122:23] assign ren2_uops_0_lrs2_rtype = r_uop_lrs2_rtype; // @[rename-stage.scala:108:29, :122:23] reg r_uop_frs3_en; // @[rename-stage.scala:122:23] assign ren2_uops_0_frs3_en = r_uop_frs3_en; // @[rename-stage.scala:108:29, :122:23] reg r_uop_fp_val; // @[rename-stage.scala:122:23] assign ren2_uops_0_fp_val = r_uop_fp_val; // @[rename-stage.scala:108:29, :122:23] reg r_uop_fp_single; // @[rename-stage.scala:122:23] assign ren2_uops_0_fp_single = r_uop_fp_single; // @[rename-stage.scala:108:29, :122:23] reg r_uop_xcpt_pf_if; // @[rename-stage.scala:122:23] assign ren2_uops_0_xcpt_pf_if = r_uop_xcpt_pf_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_xcpt_ae_if; // @[rename-stage.scala:122:23] assign ren2_uops_0_xcpt_ae_if = r_uop_xcpt_ae_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_xcpt_ma_if; // @[rename-stage.scala:122:23] assign ren2_uops_0_xcpt_ma_if = r_uop_xcpt_ma_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_bp_debug_if; // @[rename-stage.scala:122:23] assign ren2_uops_0_bp_debug_if = r_uop_bp_debug_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_bp_xcpt_if; // @[rename-stage.scala:122:23] assign ren2_uops_0_bp_xcpt_if = r_uop_bp_xcpt_if; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_debug_fsrc; // @[rename-stage.scala:122:23] assign ren2_uops_0_debug_fsrc = r_uop_debug_fsrc; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_debug_tsrc; // @[rename-stage.scala:122:23] assign ren2_uops_0_debug_tsrc = r_uop_debug_tsrc; // @[rename-stage.scala:108:29, :122:23] wire [6:0] r_uop_newuop_uopc = next_uop_uopc; // @[util.scala:73:26] wire [31:0] r_uop_newuop_inst = next_uop_inst; // @[util.scala:73:26] wire [31:0] r_uop_newuop_debug_inst = next_uop_debug_inst; // @[util.scala:73:26] wire r_uop_newuop_is_rvc = next_uop_is_rvc; // @[util.scala:73:26] wire [39:0] r_uop_newuop_debug_pc = next_uop_debug_pc; // @[util.scala:73:26] wire [2:0] r_uop_newuop_iq_type = next_uop_iq_type; // @[util.scala:73:26] wire [9:0] r_uop_newuop_fu_code = next_uop_fu_code; // @[util.scala:73:26] wire [3:0] r_uop_newuop_ctrl_br_type = next_uop_ctrl_br_type; // @[util.scala:73:26] wire [1:0] r_uop_newuop_ctrl_op1_sel = next_uop_ctrl_op1_sel; // @[util.scala:73:26] wire [2:0] r_uop_newuop_ctrl_op2_sel = next_uop_ctrl_op2_sel; // @[util.scala:73:26] wire [2:0] r_uop_newuop_ctrl_imm_sel = next_uop_ctrl_imm_sel; // @[util.scala:73:26] wire [4:0] r_uop_newuop_ctrl_op_fcn = next_uop_ctrl_op_fcn; // @[util.scala:73:26] wire r_uop_newuop_ctrl_fcn_dw = next_uop_ctrl_fcn_dw; // @[util.scala:73:26] wire [2:0] r_uop_newuop_ctrl_csr_cmd = next_uop_ctrl_csr_cmd; // @[util.scala:73:26] wire r_uop_newuop_ctrl_is_load = next_uop_ctrl_is_load; // @[util.scala:73:26] wire r_uop_newuop_ctrl_is_sta = next_uop_ctrl_is_sta; // @[util.scala:73:26] wire r_uop_newuop_ctrl_is_std = next_uop_ctrl_is_std; // @[util.scala:73:26] wire [1:0] r_uop_newuop_iw_state = next_uop_iw_state; // @[util.scala:73:26] wire r_uop_newuop_iw_p1_poisoned = next_uop_iw_p1_poisoned; // @[util.scala:73:26] wire r_uop_newuop_iw_p2_poisoned = next_uop_iw_p2_poisoned; // @[util.scala:73:26] wire r_uop_newuop_is_br = next_uop_is_br; // @[util.scala:73:26] wire r_uop_newuop_is_jalr = next_uop_is_jalr; // @[util.scala:73:26] wire r_uop_newuop_is_jal = next_uop_is_jal; // @[util.scala:73:26] wire r_uop_newuop_is_sfb = next_uop_is_sfb; // @[util.scala:73:26] wire [3:0] r_uop_newuop_br_tag = next_uop_br_tag; // @[util.scala:73:26] wire [4:0] r_uop_newuop_ftq_idx = next_uop_ftq_idx; // @[util.scala:73:26] wire r_uop_newuop_edge_inst = next_uop_edge_inst; // @[util.scala:73:26] wire [5:0] r_uop_newuop_pc_lob = next_uop_pc_lob; // @[util.scala:73:26] wire r_uop_newuop_taken = next_uop_taken; // @[util.scala:73:26] wire [19:0] r_uop_newuop_imm_packed = next_uop_imm_packed; // @[util.scala:73:26] wire [11:0] r_uop_newuop_csr_addr = next_uop_csr_addr; // @[util.scala:73:26] wire [6:0] r_uop_newuop_rob_idx = next_uop_rob_idx; // @[util.scala:73:26] wire [4:0] r_uop_newuop_ldq_idx = next_uop_ldq_idx; // @[util.scala:73:26] wire [4:0] r_uop_newuop_stq_idx = next_uop_stq_idx; // @[util.scala:73:26] wire [1:0] r_uop_newuop_rxq_idx = next_uop_rxq_idx; // @[util.scala:73:26] wire [6:0] r_uop_newuop_pdst = next_uop_pdst; // @[util.scala:73:26] wire [6:0] r_uop_newuop_prs1 = next_uop_prs1; // @[util.scala:73:26] wire [6:0] r_uop_newuop_prs2 = next_uop_prs2; // @[util.scala:73:26] wire [6:0] r_uop_newuop_prs3 = next_uop_prs3; // @[util.scala:73:26] wire [4:0] r_uop_newuop_ppred = next_uop_ppred; // @[util.scala:73:26] wire r_uop_newuop_prs1_busy = next_uop_prs1_busy; // @[util.scala:73:26] wire r_uop_newuop_prs2_busy = next_uop_prs2_busy; // @[util.scala:73:26] wire r_uop_newuop_prs3_busy = next_uop_prs3_busy; // @[util.scala:73:26] wire r_uop_newuop_ppred_busy = next_uop_ppred_busy; // @[util.scala:73:26] wire [6:0] r_uop_newuop_stale_pdst = next_uop_stale_pdst; // @[util.scala:73:26] wire r_uop_newuop_exception = next_uop_exception; // @[util.scala:73:26] wire [63:0] r_uop_newuop_exc_cause = next_uop_exc_cause; // @[util.scala:73:26] wire r_uop_newuop_bypassable = next_uop_bypassable; // @[util.scala:73:26] wire [4:0] r_uop_newuop_mem_cmd = next_uop_mem_cmd; // @[util.scala:73:26] wire [1:0] r_uop_newuop_mem_size = next_uop_mem_size; // @[util.scala:73:26] wire r_uop_newuop_mem_signed = next_uop_mem_signed; // @[util.scala:73:26] wire r_uop_newuop_is_fence = next_uop_is_fence; // @[util.scala:73:26] wire r_uop_newuop_is_fencei = next_uop_is_fencei; // @[util.scala:73:26] wire r_uop_newuop_is_amo = next_uop_is_amo; // @[util.scala:73:26] wire r_uop_newuop_uses_ldq = next_uop_uses_ldq; // @[util.scala:73:26] wire r_uop_newuop_uses_stq = next_uop_uses_stq; // @[util.scala:73:26] wire r_uop_newuop_is_sys_pc2epc = next_uop_is_sys_pc2epc; // @[util.scala:73:26] wire r_uop_newuop_is_unique = next_uop_is_unique; // @[util.scala:73:26] wire r_uop_newuop_flush_on_commit = next_uop_flush_on_commit; // @[util.scala:73:26] wire r_uop_newuop_ldst_is_rs1 = next_uop_ldst_is_rs1; // @[util.scala:73:26] wire [5:0] r_uop_newuop_ldst = next_uop_ldst; // @[util.scala:73:26] wire [5:0] r_uop_newuop_lrs1 = next_uop_lrs1; // @[util.scala:73:26] wire [5:0] r_uop_newuop_lrs2 = next_uop_lrs2; // @[util.scala:73:26] wire [5:0] r_uop_newuop_lrs3 = next_uop_lrs3; // @[util.scala:73:26] wire r_uop_newuop_ldst_val = next_uop_ldst_val; // @[util.scala:73:26] wire [1:0] r_uop_newuop_dst_rtype = next_uop_dst_rtype; // @[util.scala:73:26] wire [1:0] r_uop_newuop_lrs1_rtype = next_uop_lrs1_rtype; // @[util.scala:73:26] wire [1:0] r_uop_newuop_lrs2_rtype = next_uop_lrs2_rtype; // @[util.scala:73:26] wire r_uop_newuop_frs3_en = next_uop_frs3_en; // @[util.scala:73:26] wire r_uop_newuop_fp_val = next_uop_fp_val; // @[util.scala:73:26] wire r_uop_newuop_fp_single = next_uop_fp_single; // @[util.scala:73:26] wire r_uop_newuop_xcpt_pf_if = next_uop_xcpt_pf_if; // @[util.scala:73:26] wire r_uop_newuop_xcpt_ae_if = next_uop_xcpt_ae_if; // @[util.scala:73:26] wire r_uop_newuop_xcpt_ma_if = next_uop_xcpt_ma_if; // @[util.scala:73:26] wire r_uop_newuop_bp_debug_if = next_uop_bp_debug_if; // @[util.scala:73:26] wire r_uop_newuop_bp_xcpt_if = next_uop_bp_xcpt_if; // @[util.scala:73:26] wire [1:0] r_uop_newuop_debug_fsrc = next_uop_debug_fsrc; // @[util.scala:73:26] wire [1:0] r_uop_newuop_debug_tsrc = next_uop_debug_tsrc; // @[util.scala:73:26] wire [15:0] next_uop_br_mask; // @[rename-stage.scala:123:24] wire _r_valid_T = ~io_dis_fire_0_0; // @[rename-stage.scala:133:29, :356:7] wire _r_valid_T_1 = r_valid & _r_valid_T; // @[rename-stage.scala:121:27, :133:{26,29}] wire _GEN = io_kill_0 | ~io_dis_ready_0; // @[rename-stage.scala:125:14, :127:20, :129:30, :356:7] assign next_uop_uopc = _GEN ? r_uop_uopc : ren1_uops_0_uopc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_inst = _GEN ? r_uop_inst : ren1_uops_0_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_debug_inst = _GEN ? r_uop_debug_inst : ren1_uops_0_debug_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_rvc = _GEN ? r_uop_is_rvc : ren1_uops_0_is_rvc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_debug_pc = _GEN ? r_uop_debug_pc : ren1_uops_0_debug_pc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_iq_type = _GEN ? r_uop_iq_type : ren1_uops_0_iq_type; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_fu_code = _GEN ? r_uop_fu_code : ren1_uops_0_fu_code; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_br_type = _GEN ? r_uop_ctrl_br_type : 4'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_op1_sel = _GEN ? r_uop_ctrl_op1_sel : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_op2_sel = _GEN ? r_uop_ctrl_op2_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_imm_sel = _GEN ? r_uop_ctrl_imm_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_op_fcn = _GEN ? r_uop_ctrl_op_fcn : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_fcn_dw = _GEN & r_uop_ctrl_fcn_dw; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_csr_cmd = _GEN ? r_uop_ctrl_csr_cmd : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_is_load = _GEN & r_uop_ctrl_is_load; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_is_sta = _GEN & r_uop_ctrl_is_sta; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_is_std = _GEN & r_uop_ctrl_is_std; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_iw_state = _GEN ? r_uop_iw_state : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_iw_p1_poisoned = _GEN & r_uop_iw_p1_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_iw_p2_poisoned = _GEN & r_uop_iw_p2_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_br = _GEN ? r_uop_is_br : ren1_uops_0_is_br; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_jalr = _GEN ? r_uop_is_jalr : ren1_uops_0_is_jalr; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_jal = _GEN ? r_uop_is_jal : ren1_uops_0_is_jal; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_sfb = _GEN ? r_uop_is_sfb : ren1_uops_0_is_sfb; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_br_mask = _GEN ? r_uop_br_mask : ren1_uops_0_br_mask; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_br_tag = _GEN ? r_uop_br_tag : ren1_uops_0_br_tag; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ftq_idx = _GEN ? r_uop_ftq_idx : ren1_uops_0_ftq_idx; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_edge_inst = _GEN ? r_uop_edge_inst : ren1_uops_0_edge_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_pc_lob = _GEN ? r_uop_pc_lob : ren1_uops_0_pc_lob; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_taken = _GEN ? r_uop_taken : ren1_uops_0_taken; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_imm_packed = _GEN ? r_uop_imm_packed : ren1_uops_0_imm_packed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_csr_addr = _GEN ? r_uop_csr_addr : 12'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_rob_idx = _GEN ? r_uop_rob_idx : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ldq_idx = _GEN ? r_uop_ldq_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_stq_idx = _GEN ? r_uop_stq_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_rxq_idx = _GEN ? r_uop_rxq_idx : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_pdst = _GEN ? r_uop_pdst : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_prs1 = _GEN ? r_uop_prs1 : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_prs2 = _GEN ? r_uop_prs2 : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_prs3 = _GEN ? r_uop_prs3 : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ppred = _GEN ? r_uop_ppred : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_prs1_busy = _GEN & r_uop_prs1_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_prs2_busy = _GEN & r_uop_prs2_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_prs3_busy = _GEN & r_uop_prs3_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ppred_busy = _GEN & r_uop_ppred_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_stale_pdst = _GEN ? r_uop_stale_pdst : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_exception = _GEN ? r_uop_exception : ren1_uops_0_exception; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_exc_cause = _GEN ? r_uop_exc_cause : ren1_uops_0_exc_cause; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_bypassable = _GEN ? r_uop_bypassable : ren1_uops_0_bypassable; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_mem_cmd = _GEN ? r_uop_mem_cmd : ren1_uops_0_mem_cmd; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_mem_size = _GEN ? r_uop_mem_size : ren1_uops_0_mem_size; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_mem_signed = _GEN ? r_uop_mem_signed : ren1_uops_0_mem_signed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_fence = _GEN ? r_uop_is_fence : ren1_uops_0_is_fence; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_fencei = _GEN ? r_uop_is_fencei : ren1_uops_0_is_fencei; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_amo = _GEN ? r_uop_is_amo : ren1_uops_0_is_amo; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_uses_ldq = _GEN ? r_uop_uses_ldq : ren1_uops_0_uses_ldq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_uses_stq = _GEN ? r_uop_uses_stq : ren1_uops_0_uses_stq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_sys_pc2epc = _GEN ? r_uop_is_sys_pc2epc : ren1_uops_0_is_sys_pc2epc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_unique = _GEN ? r_uop_is_unique : ren1_uops_0_is_unique; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_flush_on_commit = _GEN ? r_uop_flush_on_commit : ren1_uops_0_flush_on_commit; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ldst_is_rs1 = _GEN & r_uop_ldst_is_rs1; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ldst = _GEN ? r_uop_ldst : ren1_uops_0_ldst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_lrs1 = _GEN ? r_uop_lrs1 : ren1_uops_0_lrs1; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_lrs2 = _GEN ? r_uop_lrs2 : ren1_uops_0_lrs2; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_lrs3 = _GEN ? r_uop_lrs3 : ren1_uops_0_lrs3; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ldst_val = _GEN ? r_uop_ldst_val : ren1_uops_0_ldst_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_dst_rtype = _GEN ? r_uop_dst_rtype : ren1_uops_0_dst_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_lrs1_rtype = _GEN ? r_uop_lrs1_rtype : ren1_uops_0_lrs1_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_lrs2_rtype = _GEN ? r_uop_lrs2_rtype : ren1_uops_0_lrs2_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_frs3_en = _GEN ? r_uop_frs3_en : ren1_uops_0_frs3_en; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_fp_val = _GEN ? r_uop_fp_val : ren1_uops_0_fp_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_fp_single = _GEN ? r_uop_fp_single : ren1_uops_0_fp_single; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_xcpt_pf_if = _GEN ? r_uop_xcpt_pf_if : ren1_uops_0_xcpt_pf_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_xcpt_ae_if = _GEN ? r_uop_xcpt_ae_if : ren1_uops_0_xcpt_ae_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_xcpt_ma_if = _GEN & r_uop_xcpt_ma_if; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_bp_debug_if = _GEN ? r_uop_bp_debug_if : ren1_uops_0_bp_debug_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_bp_xcpt_if = _GEN ? r_uop_bp_xcpt_if : ren1_uops_0_bp_xcpt_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_debug_fsrc = _GEN ? r_uop_debug_fsrc : ren1_uops_0_debug_fsrc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_debug_tsrc = _GEN ? r_uop_debug_tsrc : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] wire [15:0] _r_uop_newuop_br_mask_T_1; // @[util.scala:74:35] wire [15:0] r_uop_newuop_br_mask; // @[util.scala:73:26] wire [15:0] _r_uop_newuop_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:74:37] assign _r_uop_newuop_br_mask_T_1 = next_uop_br_mask & _r_uop_newuop_br_mask_T; // @[util.scala:74:{35,37}] assign r_uop_newuop_br_mask = _r_uop_newuop_br_mask_T_1; // @[util.scala:73:26, :74:35] reg r_valid_1; // @[rename-stage.scala:121:27] assign ren2_valids_1 = r_valid_1; // @[rename-stage.scala:107:29, :121:27] reg [6:0] r_uop_1_uopc; // @[rename-stage.scala:122:23] assign ren2_uops_1_uopc = r_uop_1_uopc; // @[rename-stage.scala:108:29, :122:23] reg [31:0] r_uop_1_inst; // @[rename-stage.scala:122:23] assign ren2_uops_1_inst = r_uop_1_inst; // @[rename-stage.scala:108:29, :122:23] reg [31:0] r_uop_1_debug_inst; // @[rename-stage.scala:122:23] assign ren2_uops_1_debug_inst = r_uop_1_debug_inst; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_is_rvc; // @[rename-stage.scala:122:23] assign ren2_uops_1_is_rvc = r_uop_1_is_rvc; // @[rename-stage.scala:108:29, :122:23] reg [39:0] r_uop_1_debug_pc; // @[rename-stage.scala:122:23] assign ren2_uops_1_debug_pc = r_uop_1_debug_pc; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_1_iq_type; // @[rename-stage.scala:122:23] assign ren2_uops_1_iq_type = r_uop_1_iq_type; // @[rename-stage.scala:108:29, :122:23] reg [9:0] r_uop_1_fu_code; // @[rename-stage.scala:122:23] assign ren2_uops_1_fu_code = r_uop_1_fu_code; // @[rename-stage.scala:108:29, :122:23] reg [3:0] r_uop_1_ctrl_br_type; // @[rename-stage.scala:122:23] assign ren2_uops_1_ctrl_br_type = r_uop_1_ctrl_br_type; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_1_ctrl_op1_sel; // @[rename-stage.scala:122:23] assign ren2_uops_1_ctrl_op1_sel = r_uop_1_ctrl_op1_sel; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_1_ctrl_op2_sel; // @[rename-stage.scala:122:23] assign ren2_uops_1_ctrl_op2_sel = r_uop_1_ctrl_op2_sel; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_1_ctrl_imm_sel; // @[rename-stage.scala:122:23] assign ren2_uops_1_ctrl_imm_sel = r_uop_1_ctrl_imm_sel; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_1_ctrl_op_fcn; // @[rename-stage.scala:122:23] assign ren2_uops_1_ctrl_op_fcn = r_uop_1_ctrl_op_fcn; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_ctrl_fcn_dw; // @[rename-stage.scala:122:23] assign ren2_uops_1_ctrl_fcn_dw = r_uop_1_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_1_ctrl_csr_cmd; // @[rename-stage.scala:122:23] assign ren2_uops_1_ctrl_csr_cmd = r_uop_1_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_ctrl_is_load; // @[rename-stage.scala:122:23] assign ren2_uops_1_ctrl_is_load = r_uop_1_ctrl_is_load; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_ctrl_is_sta; // @[rename-stage.scala:122:23] assign ren2_uops_1_ctrl_is_sta = r_uop_1_ctrl_is_sta; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_ctrl_is_std; // @[rename-stage.scala:122:23] assign ren2_uops_1_ctrl_is_std = r_uop_1_ctrl_is_std; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_1_iw_state; // @[rename-stage.scala:122:23] assign ren2_uops_1_iw_state = r_uop_1_iw_state; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_iw_p1_poisoned; // @[rename-stage.scala:122:23] assign ren2_uops_1_iw_p1_poisoned = r_uop_1_iw_p1_poisoned; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_iw_p2_poisoned; // @[rename-stage.scala:122:23] assign ren2_uops_1_iw_p2_poisoned = r_uop_1_iw_p2_poisoned; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_is_br; // @[rename-stage.scala:122:23] assign ren2_uops_1_is_br = r_uop_1_is_br; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_is_jalr; // @[rename-stage.scala:122:23] assign ren2_uops_1_is_jalr = r_uop_1_is_jalr; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_is_jal; // @[rename-stage.scala:122:23] assign ren2_uops_1_is_jal = r_uop_1_is_jal; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_is_sfb; // @[rename-stage.scala:122:23] assign ren2_uops_1_is_sfb = r_uop_1_is_sfb; // @[rename-stage.scala:108:29, :122:23] reg [15:0] r_uop_1_br_mask; // @[rename-stage.scala:122:23] assign ren2_uops_1_br_mask = r_uop_1_br_mask; // @[rename-stage.scala:108:29, :122:23] reg [3:0] r_uop_1_br_tag; // @[rename-stage.scala:122:23] assign ren2_uops_1_br_tag = r_uop_1_br_tag; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_1_ftq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_1_ftq_idx = r_uop_1_ftq_idx; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_edge_inst; // @[rename-stage.scala:122:23] assign ren2_uops_1_edge_inst = r_uop_1_edge_inst; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_1_pc_lob; // @[rename-stage.scala:122:23] assign ren2_uops_1_pc_lob = r_uop_1_pc_lob; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_taken; // @[rename-stage.scala:122:23] assign ren2_uops_1_taken = r_uop_1_taken; // @[rename-stage.scala:108:29, :122:23] reg [19:0] r_uop_1_imm_packed; // @[rename-stage.scala:122:23] assign ren2_uops_1_imm_packed = r_uop_1_imm_packed; // @[rename-stage.scala:108:29, :122:23] reg [11:0] r_uop_1_csr_addr; // @[rename-stage.scala:122:23] assign ren2_uops_1_csr_addr = r_uop_1_csr_addr; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_1_rob_idx; // @[rename-stage.scala:122:23] assign ren2_uops_1_rob_idx = r_uop_1_rob_idx; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_1_ldq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_1_ldq_idx = r_uop_1_ldq_idx; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_1_stq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_1_stq_idx = r_uop_1_stq_idx; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_1_rxq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_1_rxq_idx = r_uop_1_rxq_idx; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_1_pdst; // @[rename-stage.scala:122:23] assign ren2_uops_1_pdst = r_uop_1_pdst; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_1_prs1; // @[rename-stage.scala:122:23] assign ren2_uops_1_prs1 = r_uop_1_prs1; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_1_prs2; // @[rename-stage.scala:122:23] assign ren2_uops_1_prs2 = r_uop_1_prs2; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_1_prs3; // @[rename-stage.scala:122:23] assign ren2_uops_1_prs3 = r_uop_1_prs3; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_1_ppred; // @[rename-stage.scala:122:23] assign ren2_uops_1_ppred = r_uop_1_ppred; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_prs1_busy; // @[rename-stage.scala:122:23] assign ren2_uops_1_prs1_busy = r_uop_1_prs1_busy; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_prs2_busy; // @[rename-stage.scala:122:23] assign ren2_uops_1_prs2_busy = r_uop_1_prs2_busy; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_prs3_busy; // @[rename-stage.scala:122:23] assign ren2_uops_1_prs3_busy = r_uop_1_prs3_busy; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_ppred_busy; // @[rename-stage.scala:122:23] assign ren2_uops_1_ppred_busy = r_uop_1_ppred_busy; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_1_stale_pdst; // @[rename-stage.scala:122:23] assign ren2_uops_1_stale_pdst = r_uop_1_stale_pdst; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_exception; // @[rename-stage.scala:122:23] assign ren2_uops_1_exception = r_uop_1_exception; // @[rename-stage.scala:108:29, :122:23] reg [63:0] r_uop_1_exc_cause; // @[rename-stage.scala:122:23] assign ren2_uops_1_exc_cause = r_uop_1_exc_cause; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_bypassable; // @[rename-stage.scala:122:23] assign ren2_uops_1_bypassable = r_uop_1_bypassable; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_1_mem_cmd; // @[rename-stage.scala:122:23] assign ren2_uops_1_mem_cmd = r_uop_1_mem_cmd; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_1_mem_size; // @[rename-stage.scala:122:23] assign ren2_uops_1_mem_size = r_uop_1_mem_size; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_mem_signed; // @[rename-stage.scala:122:23] assign ren2_uops_1_mem_signed = r_uop_1_mem_signed; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_is_fence; // @[rename-stage.scala:122:23] assign ren2_uops_1_is_fence = r_uop_1_is_fence; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_is_fencei; // @[rename-stage.scala:122:23] assign ren2_uops_1_is_fencei = r_uop_1_is_fencei; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_is_amo; // @[rename-stage.scala:122:23] assign ren2_uops_1_is_amo = r_uop_1_is_amo; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_uses_ldq; // @[rename-stage.scala:122:23] assign ren2_uops_1_uses_ldq = r_uop_1_uses_ldq; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_uses_stq; // @[rename-stage.scala:122:23] assign ren2_uops_1_uses_stq = r_uop_1_uses_stq; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_is_sys_pc2epc; // @[rename-stage.scala:122:23] assign ren2_uops_1_is_sys_pc2epc = r_uop_1_is_sys_pc2epc; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_is_unique; // @[rename-stage.scala:122:23] assign ren2_uops_1_is_unique = r_uop_1_is_unique; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_flush_on_commit; // @[rename-stage.scala:122:23] assign ren2_uops_1_flush_on_commit = r_uop_1_flush_on_commit; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_ldst_is_rs1; // @[rename-stage.scala:122:23] assign ren2_uops_1_ldst_is_rs1 = r_uop_1_ldst_is_rs1; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_1_ldst; // @[rename-stage.scala:122:23] assign ren2_uops_1_ldst = r_uop_1_ldst; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_1_lrs1; // @[rename-stage.scala:122:23] assign ren2_uops_1_lrs1 = r_uop_1_lrs1; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_1_lrs2; // @[rename-stage.scala:122:23] assign ren2_uops_1_lrs2 = r_uop_1_lrs2; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_1_lrs3; // @[rename-stage.scala:122:23] assign ren2_uops_1_lrs3 = r_uop_1_lrs3; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_ldst_val; // @[rename-stage.scala:122:23] assign ren2_uops_1_ldst_val = r_uop_1_ldst_val; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_1_dst_rtype; // @[rename-stage.scala:122:23] assign ren2_uops_1_dst_rtype = r_uop_1_dst_rtype; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_1_lrs1_rtype; // @[rename-stage.scala:122:23] assign ren2_uops_1_lrs1_rtype = r_uop_1_lrs1_rtype; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_1_lrs2_rtype; // @[rename-stage.scala:122:23] assign ren2_uops_1_lrs2_rtype = r_uop_1_lrs2_rtype; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_frs3_en; // @[rename-stage.scala:122:23] assign ren2_uops_1_frs3_en = r_uop_1_frs3_en; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_fp_val; // @[rename-stage.scala:122:23] assign ren2_uops_1_fp_val = r_uop_1_fp_val; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_fp_single; // @[rename-stage.scala:122:23] assign ren2_uops_1_fp_single = r_uop_1_fp_single; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_xcpt_pf_if; // @[rename-stage.scala:122:23] assign ren2_uops_1_xcpt_pf_if = r_uop_1_xcpt_pf_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_xcpt_ae_if; // @[rename-stage.scala:122:23] assign ren2_uops_1_xcpt_ae_if = r_uop_1_xcpt_ae_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_xcpt_ma_if; // @[rename-stage.scala:122:23] assign ren2_uops_1_xcpt_ma_if = r_uop_1_xcpt_ma_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_bp_debug_if; // @[rename-stage.scala:122:23] assign ren2_uops_1_bp_debug_if = r_uop_1_bp_debug_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_1_bp_xcpt_if; // @[rename-stage.scala:122:23] assign ren2_uops_1_bp_xcpt_if = r_uop_1_bp_xcpt_if; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_1_debug_fsrc; // @[rename-stage.scala:122:23] assign ren2_uops_1_debug_fsrc = r_uop_1_debug_fsrc; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_1_debug_tsrc; // @[rename-stage.scala:122:23] assign ren2_uops_1_debug_tsrc = r_uop_1_debug_tsrc; // @[rename-stage.scala:108:29, :122:23] wire [6:0] r_uop_newuop_1_uopc = next_uop_1_uopc; // @[util.scala:73:26] wire [31:0] r_uop_newuop_1_inst = next_uop_1_inst; // @[util.scala:73:26] wire [31:0] r_uop_newuop_1_debug_inst = next_uop_1_debug_inst; // @[util.scala:73:26] wire r_uop_newuop_1_is_rvc = next_uop_1_is_rvc; // @[util.scala:73:26] wire [39:0] r_uop_newuop_1_debug_pc = next_uop_1_debug_pc; // @[util.scala:73:26] wire [2:0] r_uop_newuop_1_iq_type = next_uop_1_iq_type; // @[util.scala:73:26] wire [9:0] r_uop_newuop_1_fu_code = next_uop_1_fu_code; // @[util.scala:73:26] wire [3:0] r_uop_newuop_1_ctrl_br_type = next_uop_1_ctrl_br_type; // @[util.scala:73:26] wire [1:0] r_uop_newuop_1_ctrl_op1_sel = next_uop_1_ctrl_op1_sel; // @[util.scala:73:26] wire [2:0] r_uop_newuop_1_ctrl_op2_sel = next_uop_1_ctrl_op2_sel; // @[util.scala:73:26] wire [2:0] r_uop_newuop_1_ctrl_imm_sel = next_uop_1_ctrl_imm_sel; // @[util.scala:73:26] wire [4:0] r_uop_newuop_1_ctrl_op_fcn = next_uop_1_ctrl_op_fcn; // @[util.scala:73:26] wire r_uop_newuop_1_ctrl_fcn_dw = next_uop_1_ctrl_fcn_dw; // @[util.scala:73:26] wire [2:0] r_uop_newuop_1_ctrl_csr_cmd = next_uop_1_ctrl_csr_cmd; // @[util.scala:73:26] wire r_uop_newuop_1_ctrl_is_load = next_uop_1_ctrl_is_load; // @[util.scala:73:26] wire r_uop_newuop_1_ctrl_is_sta = next_uop_1_ctrl_is_sta; // @[util.scala:73:26] wire r_uop_newuop_1_ctrl_is_std = next_uop_1_ctrl_is_std; // @[util.scala:73:26] wire [1:0] r_uop_newuop_1_iw_state = next_uop_1_iw_state; // @[util.scala:73:26] wire r_uop_newuop_1_iw_p1_poisoned = next_uop_1_iw_p1_poisoned; // @[util.scala:73:26] wire r_uop_newuop_1_iw_p2_poisoned = next_uop_1_iw_p2_poisoned; // @[util.scala:73:26] wire r_uop_newuop_1_is_br = next_uop_1_is_br; // @[util.scala:73:26] wire r_uop_newuop_1_is_jalr = next_uop_1_is_jalr; // @[util.scala:73:26] wire r_uop_newuop_1_is_jal = next_uop_1_is_jal; // @[util.scala:73:26] wire r_uop_newuop_1_is_sfb = next_uop_1_is_sfb; // @[util.scala:73:26] wire [3:0] r_uop_newuop_1_br_tag = next_uop_1_br_tag; // @[util.scala:73:26] wire [4:0] r_uop_newuop_1_ftq_idx = next_uop_1_ftq_idx; // @[util.scala:73:26] wire r_uop_newuop_1_edge_inst = next_uop_1_edge_inst; // @[util.scala:73:26] wire [5:0] r_uop_newuop_1_pc_lob = next_uop_1_pc_lob; // @[util.scala:73:26] wire r_uop_newuop_1_taken = next_uop_1_taken; // @[util.scala:73:26] wire [19:0] r_uop_newuop_1_imm_packed = next_uop_1_imm_packed; // @[util.scala:73:26] wire [11:0] r_uop_newuop_1_csr_addr = next_uop_1_csr_addr; // @[util.scala:73:26] wire [6:0] r_uop_newuop_1_rob_idx = next_uop_1_rob_idx; // @[util.scala:73:26] wire [4:0] r_uop_newuop_1_ldq_idx = next_uop_1_ldq_idx; // @[util.scala:73:26] wire [4:0] r_uop_newuop_1_stq_idx = next_uop_1_stq_idx; // @[util.scala:73:26] wire [1:0] r_uop_newuop_1_rxq_idx = next_uop_1_rxq_idx; // @[util.scala:73:26] wire [6:0] r_uop_newuop_1_pdst = next_uop_1_pdst; // @[util.scala:73:26] wire [6:0] r_uop_newuop_1_prs1 = next_uop_1_prs1; // @[util.scala:73:26] wire [6:0] r_uop_newuop_1_prs2 = next_uop_1_prs2; // @[util.scala:73:26] wire [6:0] r_uop_newuop_1_prs3 = next_uop_1_prs3; // @[util.scala:73:26] wire [4:0] r_uop_newuop_1_ppred = next_uop_1_ppred; // @[util.scala:73:26] wire r_uop_newuop_1_prs1_busy = next_uop_1_prs1_busy; // @[util.scala:73:26] wire r_uop_newuop_1_prs2_busy = next_uop_1_prs2_busy; // @[util.scala:73:26] wire r_uop_newuop_1_prs3_busy = next_uop_1_prs3_busy; // @[util.scala:73:26] wire r_uop_newuop_1_ppred_busy = next_uop_1_ppred_busy; // @[util.scala:73:26] wire [6:0] r_uop_newuop_1_stale_pdst = next_uop_1_stale_pdst; // @[util.scala:73:26] wire r_uop_newuop_1_exception = next_uop_1_exception; // @[util.scala:73:26] wire [63:0] r_uop_newuop_1_exc_cause = next_uop_1_exc_cause; // @[util.scala:73:26] wire r_uop_newuop_1_bypassable = next_uop_1_bypassable; // @[util.scala:73:26] wire [4:0] r_uop_newuop_1_mem_cmd = next_uop_1_mem_cmd; // @[util.scala:73:26] wire [1:0] r_uop_newuop_1_mem_size = next_uop_1_mem_size; // @[util.scala:73:26] wire r_uop_newuop_1_mem_signed = next_uop_1_mem_signed; // @[util.scala:73:26] wire r_uop_newuop_1_is_fence = next_uop_1_is_fence; // @[util.scala:73:26] wire r_uop_newuop_1_is_fencei = next_uop_1_is_fencei; // @[util.scala:73:26] wire r_uop_newuop_1_is_amo = next_uop_1_is_amo; // @[util.scala:73:26] wire r_uop_newuop_1_uses_ldq = next_uop_1_uses_ldq; // @[util.scala:73:26] wire r_uop_newuop_1_uses_stq = next_uop_1_uses_stq; // @[util.scala:73:26] wire r_uop_newuop_1_is_sys_pc2epc = next_uop_1_is_sys_pc2epc; // @[util.scala:73:26] wire r_uop_newuop_1_is_unique = next_uop_1_is_unique; // @[util.scala:73:26] wire r_uop_newuop_1_flush_on_commit = next_uop_1_flush_on_commit; // @[util.scala:73:26] wire r_uop_newuop_1_ldst_is_rs1 = next_uop_1_ldst_is_rs1; // @[util.scala:73:26] wire [5:0] r_uop_newuop_1_ldst = next_uop_1_ldst; // @[util.scala:73:26] wire [5:0] r_uop_newuop_1_lrs1 = next_uop_1_lrs1; // @[util.scala:73:26] wire [5:0] r_uop_newuop_1_lrs2 = next_uop_1_lrs2; // @[util.scala:73:26] wire [5:0] r_uop_newuop_1_lrs3 = next_uop_1_lrs3; // @[util.scala:73:26] wire r_uop_newuop_1_ldst_val = next_uop_1_ldst_val; // @[util.scala:73:26] wire [1:0] r_uop_newuop_1_dst_rtype = next_uop_1_dst_rtype; // @[util.scala:73:26] wire [1:0] r_uop_newuop_1_lrs1_rtype = next_uop_1_lrs1_rtype; // @[util.scala:73:26] wire [1:0] r_uop_newuop_1_lrs2_rtype = next_uop_1_lrs2_rtype; // @[util.scala:73:26] wire r_uop_newuop_1_frs3_en = next_uop_1_frs3_en; // @[util.scala:73:26] wire r_uop_newuop_1_fp_val = next_uop_1_fp_val; // @[util.scala:73:26] wire r_uop_newuop_1_fp_single = next_uop_1_fp_single; // @[util.scala:73:26] wire r_uop_newuop_1_xcpt_pf_if = next_uop_1_xcpt_pf_if; // @[util.scala:73:26] wire r_uop_newuop_1_xcpt_ae_if = next_uop_1_xcpt_ae_if; // @[util.scala:73:26] wire r_uop_newuop_1_xcpt_ma_if = next_uop_1_xcpt_ma_if; // @[util.scala:73:26] wire r_uop_newuop_1_bp_debug_if = next_uop_1_bp_debug_if; // @[util.scala:73:26] wire r_uop_newuop_1_bp_xcpt_if = next_uop_1_bp_xcpt_if; // @[util.scala:73:26] wire [1:0] r_uop_newuop_1_debug_fsrc = next_uop_1_debug_fsrc; // @[util.scala:73:26] wire [1:0] r_uop_newuop_1_debug_tsrc = next_uop_1_debug_tsrc; // @[util.scala:73:26] wire [15:0] next_uop_1_br_mask; // @[rename-stage.scala:123:24] wire _r_valid_T_2 = ~io_dis_fire_1_0; // @[rename-stage.scala:133:29, :356:7] wire _r_valid_T_3 = r_valid_1 & _r_valid_T_2; // @[rename-stage.scala:121:27, :133:{26,29}] assign next_uop_1_uopc = _GEN ? r_uop_1_uopc : ren1_uops_1_uopc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_inst = _GEN ? r_uop_1_inst : ren1_uops_1_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_debug_inst = _GEN ? r_uop_1_debug_inst : ren1_uops_1_debug_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_is_rvc = _GEN ? r_uop_1_is_rvc : ren1_uops_1_is_rvc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_debug_pc = _GEN ? r_uop_1_debug_pc : ren1_uops_1_debug_pc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_iq_type = _GEN ? r_uop_1_iq_type : ren1_uops_1_iq_type; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_fu_code = _GEN ? r_uop_1_fu_code : ren1_uops_1_fu_code; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ctrl_br_type = _GEN ? r_uop_1_ctrl_br_type : 4'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ctrl_op1_sel = _GEN ? r_uop_1_ctrl_op1_sel : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ctrl_op2_sel = _GEN ? r_uop_1_ctrl_op2_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ctrl_imm_sel = _GEN ? r_uop_1_ctrl_imm_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ctrl_op_fcn = _GEN ? r_uop_1_ctrl_op_fcn : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ctrl_fcn_dw = _GEN & r_uop_1_ctrl_fcn_dw; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ctrl_csr_cmd = _GEN ? r_uop_1_ctrl_csr_cmd : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ctrl_is_load = _GEN & r_uop_1_ctrl_is_load; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ctrl_is_sta = _GEN & r_uop_1_ctrl_is_sta; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ctrl_is_std = _GEN & r_uop_1_ctrl_is_std; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_iw_state = _GEN ? r_uop_1_iw_state : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_iw_p1_poisoned = _GEN & r_uop_1_iw_p1_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_iw_p2_poisoned = _GEN & r_uop_1_iw_p2_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_is_br = _GEN ? r_uop_1_is_br : ren1_uops_1_is_br; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_is_jalr = _GEN ? r_uop_1_is_jalr : ren1_uops_1_is_jalr; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_is_jal = _GEN ? r_uop_1_is_jal : ren1_uops_1_is_jal; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_is_sfb = _GEN ? r_uop_1_is_sfb : ren1_uops_1_is_sfb; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_br_mask = _GEN ? r_uop_1_br_mask : ren1_uops_1_br_mask; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_br_tag = _GEN ? r_uop_1_br_tag : ren1_uops_1_br_tag; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ftq_idx = _GEN ? r_uop_1_ftq_idx : ren1_uops_1_ftq_idx; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_edge_inst = _GEN ? r_uop_1_edge_inst : ren1_uops_1_edge_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_pc_lob = _GEN ? r_uop_1_pc_lob : ren1_uops_1_pc_lob; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_taken = _GEN ? r_uop_1_taken : ren1_uops_1_taken; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_imm_packed = _GEN ? r_uop_1_imm_packed : ren1_uops_1_imm_packed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_csr_addr = _GEN ? r_uop_1_csr_addr : 12'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_rob_idx = _GEN ? r_uop_1_rob_idx : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ldq_idx = _GEN ? r_uop_1_ldq_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_stq_idx = _GEN ? r_uop_1_stq_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_rxq_idx = _GEN ? r_uop_1_rxq_idx : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_pdst = _GEN ? r_uop_1_pdst : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_prs1 = _GEN ? r_uop_1_prs1 : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_prs2 = _GEN ? r_uop_1_prs2 : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_prs3 = _GEN ? r_uop_1_prs3 : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ppred = _GEN ? r_uop_1_ppred : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_prs1_busy = _GEN & r_uop_1_prs1_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_prs2_busy = _GEN & r_uop_1_prs2_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_prs3_busy = _GEN & r_uop_1_prs3_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ppred_busy = _GEN & r_uop_1_ppred_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_stale_pdst = _GEN ? r_uop_1_stale_pdst : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_exception = _GEN ? r_uop_1_exception : ren1_uops_1_exception; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_exc_cause = _GEN ? r_uop_1_exc_cause : ren1_uops_1_exc_cause; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_bypassable = _GEN ? r_uop_1_bypassable : ren1_uops_1_bypassable; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_mem_cmd = _GEN ? r_uop_1_mem_cmd : ren1_uops_1_mem_cmd; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_mem_size = _GEN ? r_uop_1_mem_size : ren1_uops_1_mem_size; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_mem_signed = _GEN ? r_uop_1_mem_signed : ren1_uops_1_mem_signed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_is_fence = _GEN ? r_uop_1_is_fence : ren1_uops_1_is_fence; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_is_fencei = _GEN ? r_uop_1_is_fencei : ren1_uops_1_is_fencei; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_is_amo = _GEN ? r_uop_1_is_amo : ren1_uops_1_is_amo; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_uses_ldq = _GEN ? r_uop_1_uses_ldq : ren1_uops_1_uses_ldq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_uses_stq = _GEN ? r_uop_1_uses_stq : ren1_uops_1_uses_stq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_is_sys_pc2epc = _GEN ? r_uop_1_is_sys_pc2epc : ren1_uops_1_is_sys_pc2epc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_is_unique = _GEN ? r_uop_1_is_unique : ren1_uops_1_is_unique; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_flush_on_commit = _GEN ? r_uop_1_flush_on_commit : ren1_uops_1_flush_on_commit; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ldst_is_rs1 = _GEN & r_uop_1_ldst_is_rs1; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ldst = _GEN ? r_uop_1_ldst : ren1_uops_1_ldst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_lrs1 = _GEN ? r_uop_1_lrs1 : ren1_uops_1_lrs1; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_lrs2 = _GEN ? r_uop_1_lrs2 : ren1_uops_1_lrs2; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_lrs3 = _GEN ? r_uop_1_lrs3 : ren1_uops_1_lrs3; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_ldst_val = _GEN ? r_uop_1_ldst_val : ren1_uops_1_ldst_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_dst_rtype = _GEN ? r_uop_1_dst_rtype : ren1_uops_1_dst_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_lrs1_rtype = _GEN ? r_uop_1_lrs1_rtype : ren1_uops_1_lrs1_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_lrs2_rtype = _GEN ? r_uop_1_lrs2_rtype : ren1_uops_1_lrs2_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_frs3_en = _GEN ? r_uop_1_frs3_en : ren1_uops_1_frs3_en; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_fp_val = _GEN ? r_uop_1_fp_val : ren1_uops_1_fp_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_fp_single = _GEN ? r_uop_1_fp_single : ren1_uops_1_fp_single; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_xcpt_pf_if = _GEN ? r_uop_1_xcpt_pf_if : ren1_uops_1_xcpt_pf_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_xcpt_ae_if = _GEN ? r_uop_1_xcpt_ae_if : ren1_uops_1_xcpt_ae_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_xcpt_ma_if = _GEN & r_uop_1_xcpt_ma_if; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_bp_debug_if = _GEN ? r_uop_1_bp_debug_if : ren1_uops_1_bp_debug_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_bp_xcpt_if = _GEN ? r_uop_1_bp_xcpt_if : ren1_uops_1_bp_xcpt_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_debug_fsrc = _GEN ? r_uop_1_debug_fsrc : ren1_uops_1_debug_fsrc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_1_debug_tsrc = _GEN ? r_uop_1_debug_tsrc : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] wire [15:0] _r_uop_newuop_br_mask_T_3; // @[util.scala:74:35] wire [15:0] r_uop_newuop_1_br_mask; // @[util.scala:73:26] wire [15:0] _r_uop_newuop_br_mask_T_2 = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:74:37] assign _r_uop_newuop_br_mask_T_3 = next_uop_1_br_mask & _r_uop_newuop_br_mask_T_2; // @[util.scala:74:{35,37}] assign r_uop_newuop_1_br_mask = _r_uop_newuop_br_mask_T_3; // @[util.scala:73:26, :74:35] reg r_valid_2; // @[rename-stage.scala:121:27] assign ren2_valids_2 = r_valid_2; // @[rename-stage.scala:107:29, :121:27] reg [6:0] r_uop_2_uopc; // @[rename-stage.scala:122:23] assign ren2_uops_2_uopc = r_uop_2_uopc; // @[rename-stage.scala:108:29, :122:23] reg [31:0] r_uop_2_inst; // @[rename-stage.scala:122:23] assign ren2_uops_2_inst = r_uop_2_inst; // @[rename-stage.scala:108:29, :122:23] reg [31:0] r_uop_2_debug_inst; // @[rename-stage.scala:122:23] assign ren2_uops_2_debug_inst = r_uop_2_debug_inst; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_is_rvc; // @[rename-stage.scala:122:23] assign ren2_uops_2_is_rvc = r_uop_2_is_rvc; // @[rename-stage.scala:108:29, :122:23] reg [39:0] r_uop_2_debug_pc; // @[rename-stage.scala:122:23] assign ren2_uops_2_debug_pc = r_uop_2_debug_pc; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_2_iq_type; // @[rename-stage.scala:122:23] assign ren2_uops_2_iq_type = r_uop_2_iq_type; // @[rename-stage.scala:108:29, :122:23] reg [9:0] r_uop_2_fu_code; // @[rename-stage.scala:122:23] assign ren2_uops_2_fu_code = r_uop_2_fu_code; // @[rename-stage.scala:108:29, :122:23] reg [3:0] r_uop_2_ctrl_br_type; // @[rename-stage.scala:122:23] assign ren2_uops_2_ctrl_br_type = r_uop_2_ctrl_br_type; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_2_ctrl_op1_sel; // @[rename-stage.scala:122:23] assign ren2_uops_2_ctrl_op1_sel = r_uop_2_ctrl_op1_sel; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_2_ctrl_op2_sel; // @[rename-stage.scala:122:23] assign ren2_uops_2_ctrl_op2_sel = r_uop_2_ctrl_op2_sel; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_2_ctrl_imm_sel; // @[rename-stage.scala:122:23] assign ren2_uops_2_ctrl_imm_sel = r_uop_2_ctrl_imm_sel; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_2_ctrl_op_fcn; // @[rename-stage.scala:122:23] assign ren2_uops_2_ctrl_op_fcn = r_uop_2_ctrl_op_fcn; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_ctrl_fcn_dw; // @[rename-stage.scala:122:23] assign ren2_uops_2_ctrl_fcn_dw = r_uop_2_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_2_ctrl_csr_cmd; // @[rename-stage.scala:122:23] assign ren2_uops_2_ctrl_csr_cmd = r_uop_2_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_ctrl_is_load; // @[rename-stage.scala:122:23] assign ren2_uops_2_ctrl_is_load = r_uop_2_ctrl_is_load; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_ctrl_is_sta; // @[rename-stage.scala:122:23] assign ren2_uops_2_ctrl_is_sta = r_uop_2_ctrl_is_sta; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_ctrl_is_std; // @[rename-stage.scala:122:23] assign ren2_uops_2_ctrl_is_std = r_uop_2_ctrl_is_std; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_2_iw_state; // @[rename-stage.scala:122:23] assign ren2_uops_2_iw_state = r_uop_2_iw_state; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_iw_p1_poisoned; // @[rename-stage.scala:122:23] assign ren2_uops_2_iw_p1_poisoned = r_uop_2_iw_p1_poisoned; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_iw_p2_poisoned; // @[rename-stage.scala:122:23] assign ren2_uops_2_iw_p2_poisoned = r_uop_2_iw_p2_poisoned; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_is_br; // @[rename-stage.scala:122:23] assign ren2_uops_2_is_br = r_uop_2_is_br; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_is_jalr; // @[rename-stage.scala:122:23] assign ren2_uops_2_is_jalr = r_uop_2_is_jalr; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_is_jal; // @[rename-stage.scala:122:23] assign ren2_uops_2_is_jal = r_uop_2_is_jal; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_is_sfb; // @[rename-stage.scala:122:23] assign ren2_uops_2_is_sfb = r_uop_2_is_sfb; // @[rename-stage.scala:108:29, :122:23] reg [15:0] r_uop_2_br_mask; // @[rename-stage.scala:122:23] assign ren2_uops_2_br_mask = r_uop_2_br_mask; // @[rename-stage.scala:108:29, :122:23] reg [3:0] r_uop_2_br_tag; // @[rename-stage.scala:122:23] assign ren2_uops_2_br_tag = r_uop_2_br_tag; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_2_ftq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_2_ftq_idx = r_uop_2_ftq_idx; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_edge_inst; // @[rename-stage.scala:122:23] assign ren2_uops_2_edge_inst = r_uop_2_edge_inst; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_2_pc_lob; // @[rename-stage.scala:122:23] assign ren2_uops_2_pc_lob = r_uop_2_pc_lob; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_taken; // @[rename-stage.scala:122:23] assign ren2_uops_2_taken = r_uop_2_taken; // @[rename-stage.scala:108:29, :122:23] reg [19:0] r_uop_2_imm_packed; // @[rename-stage.scala:122:23] assign ren2_uops_2_imm_packed = r_uop_2_imm_packed; // @[rename-stage.scala:108:29, :122:23] reg [11:0] r_uop_2_csr_addr; // @[rename-stage.scala:122:23] assign ren2_uops_2_csr_addr = r_uop_2_csr_addr; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_2_rob_idx; // @[rename-stage.scala:122:23] assign ren2_uops_2_rob_idx = r_uop_2_rob_idx; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_2_ldq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_2_ldq_idx = r_uop_2_ldq_idx; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_2_stq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_2_stq_idx = r_uop_2_stq_idx; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_2_rxq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_2_rxq_idx = r_uop_2_rxq_idx; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_2_pdst; // @[rename-stage.scala:122:23] assign ren2_uops_2_pdst = r_uop_2_pdst; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_2_prs1; // @[rename-stage.scala:122:23] assign ren2_uops_2_prs1 = r_uop_2_prs1; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_2_prs2; // @[rename-stage.scala:122:23] assign ren2_uops_2_prs2 = r_uop_2_prs2; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_2_prs3; // @[rename-stage.scala:122:23] assign ren2_uops_2_prs3 = r_uop_2_prs3; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_2_ppred; // @[rename-stage.scala:122:23] assign ren2_uops_2_ppred = r_uop_2_ppred; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_prs1_busy; // @[rename-stage.scala:122:23] assign ren2_uops_2_prs1_busy = r_uop_2_prs1_busy; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_prs2_busy; // @[rename-stage.scala:122:23] assign ren2_uops_2_prs2_busy = r_uop_2_prs2_busy; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_prs3_busy; // @[rename-stage.scala:122:23] assign ren2_uops_2_prs3_busy = r_uop_2_prs3_busy; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_ppred_busy; // @[rename-stage.scala:122:23] assign ren2_uops_2_ppred_busy = r_uop_2_ppred_busy; // @[rename-stage.scala:108:29, :122:23] reg [6:0] r_uop_2_stale_pdst; // @[rename-stage.scala:122:23] assign ren2_uops_2_stale_pdst = r_uop_2_stale_pdst; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_exception; // @[rename-stage.scala:122:23] assign ren2_uops_2_exception = r_uop_2_exception; // @[rename-stage.scala:108:29, :122:23] reg [63:0] r_uop_2_exc_cause; // @[rename-stage.scala:122:23] assign ren2_uops_2_exc_cause = r_uop_2_exc_cause; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_bypassable; // @[rename-stage.scala:122:23] assign ren2_uops_2_bypassable = r_uop_2_bypassable; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_2_mem_cmd; // @[rename-stage.scala:122:23] assign ren2_uops_2_mem_cmd = r_uop_2_mem_cmd; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_2_mem_size; // @[rename-stage.scala:122:23] assign ren2_uops_2_mem_size = r_uop_2_mem_size; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_mem_signed; // @[rename-stage.scala:122:23] assign ren2_uops_2_mem_signed = r_uop_2_mem_signed; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_is_fence; // @[rename-stage.scala:122:23] assign ren2_uops_2_is_fence = r_uop_2_is_fence; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_is_fencei; // @[rename-stage.scala:122:23] assign ren2_uops_2_is_fencei = r_uop_2_is_fencei; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_is_amo; // @[rename-stage.scala:122:23] assign ren2_uops_2_is_amo = r_uop_2_is_amo; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_uses_ldq; // @[rename-stage.scala:122:23] assign ren2_uops_2_uses_ldq = r_uop_2_uses_ldq; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_uses_stq; // @[rename-stage.scala:122:23] assign ren2_uops_2_uses_stq = r_uop_2_uses_stq; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_is_sys_pc2epc; // @[rename-stage.scala:122:23] assign ren2_uops_2_is_sys_pc2epc = r_uop_2_is_sys_pc2epc; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_is_unique; // @[rename-stage.scala:122:23] assign ren2_uops_2_is_unique = r_uop_2_is_unique; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_flush_on_commit; // @[rename-stage.scala:122:23] assign ren2_uops_2_flush_on_commit = r_uop_2_flush_on_commit; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_ldst_is_rs1; // @[rename-stage.scala:122:23] assign ren2_uops_2_ldst_is_rs1 = r_uop_2_ldst_is_rs1; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_2_ldst; // @[rename-stage.scala:122:23] assign ren2_uops_2_ldst = r_uop_2_ldst; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_2_lrs1; // @[rename-stage.scala:122:23] assign ren2_uops_2_lrs1 = r_uop_2_lrs1; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_2_lrs2; // @[rename-stage.scala:122:23] assign ren2_uops_2_lrs2 = r_uop_2_lrs2; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_2_lrs3; // @[rename-stage.scala:122:23] assign ren2_uops_2_lrs3 = r_uop_2_lrs3; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_ldst_val; // @[rename-stage.scala:122:23] assign ren2_uops_2_ldst_val = r_uop_2_ldst_val; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_2_dst_rtype; // @[rename-stage.scala:122:23] assign ren2_uops_2_dst_rtype = r_uop_2_dst_rtype; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_2_lrs1_rtype; // @[rename-stage.scala:122:23] assign ren2_uops_2_lrs1_rtype = r_uop_2_lrs1_rtype; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_2_lrs2_rtype; // @[rename-stage.scala:122:23] assign ren2_uops_2_lrs2_rtype = r_uop_2_lrs2_rtype; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_frs3_en; // @[rename-stage.scala:122:23] assign ren2_uops_2_frs3_en = r_uop_2_frs3_en; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_fp_val; // @[rename-stage.scala:122:23] assign ren2_uops_2_fp_val = r_uop_2_fp_val; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_fp_single; // @[rename-stage.scala:122:23] assign ren2_uops_2_fp_single = r_uop_2_fp_single; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_xcpt_pf_if; // @[rename-stage.scala:122:23] assign ren2_uops_2_xcpt_pf_if = r_uop_2_xcpt_pf_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_xcpt_ae_if; // @[rename-stage.scala:122:23] assign ren2_uops_2_xcpt_ae_if = r_uop_2_xcpt_ae_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_xcpt_ma_if; // @[rename-stage.scala:122:23] assign ren2_uops_2_xcpt_ma_if = r_uop_2_xcpt_ma_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_bp_debug_if; // @[rename-stage.scala:122:23] assign ren2_uops_2_bp_debug_if = r_uop_2_bp_debug_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_2_bp_xcpt_if; // @[rename-stage.scala:122:23] assign ren2_uops_2_bp_xcpt_if = r_uop_2_bp_xcpt_if; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_2_debug_fsrc; // @[rename-stage.scala:122:23] assign ren2_uops_2_debug_fsrc = r_uop_2_debug_fsrc; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_2_debug_tsrc; // @[rename-stage.scala:122:23] assign ren2_uops_2_debug_tsrc = r_uop_2_debug_tsrc; // @[rename-stage.scala:108:29, :122:23] wire [6:0] r_uop_newuop_2_uopc = next_uop_2_uopc; // @[util.scala:73:26] wire [31:0] r_uop_newuop_2_inst = next_uop_2_inst; // @[util.scala:73:26] wire [31:0] r_uop_newuop_2_debug_inst = next_uop_2_debug_inst; // @[util.scala:73:26] wire r_uop_newuop_2_is_rvc = next_uop_2_is_rvc; // @[util.scala:73:26] wire [39:0] r_uop_newuop_2_debug_pc = next_uop_2_debug_pc; // @[util.scala:73:26] wire [2:0] r_uop_newuop_2_iq_type = next_uop_2_iq_type; // @[util.scala:73:26] wire [9:0] r_uop_newuop_2_fu_code = next_uop_2_fu_code; // @[util.scala:73:26] wire [3:0] r_uop_newuop_2_ctrl_br_type = next_uop_2_ctrl_br_type; // @[util.scala:73:26] wire [1:0] r_uop_newuop_2_ctrl_op1_sel = next_uop_2_ctrl_op1_sel; // @[util.scala:73:26] wire [2:0] r_uop_newuop_2_ctrl_op2_sel = next_uop_2_ctrl_op2_sel; // @[util.scala:73:26] wire [2:0] r_uop_newuop_2_ctrl_imm_sel = next_uop_2_ctrl_imm_sel; // @[util.scala:73:26] wire [4:0] r_uop_newuop_2_ctrl_op_fcn = next_uop_2_ctrl_op_fcn; // @[util.scala:73:26] wire r_uop_newuop_2_ctrl_fcn_dw = next_uop_2_ctrl_fcn_dw; // @[util.scala:73:26] wire [2:0] r_uop_newuop_2_ctrl_csr_cmd = next_uop_2_ctrl_csr_cmd; // @[util.scala:73:26] wire r_uop_newuop_2_ctrl_is_load = next_uop_2_ctrl_is_load; // @[util.scala:73:26] wire r_uop_newuop_2_ctrl_is_sta = next_uop_2_ctrl_is_sta; // @[util.scala:73:26] wire r_uop_newuop_2_ctrl_is_std = next_uop_2_ctrl_is_std; // @[util.scala:73:26] wire [1:0] r_uop_newuop_2_iw_state = next_uop_2_iw_state; // @[util.scala:73:26] wire r_uop_newuop_2_iw_p1_poisoned = next_uop_2_iw_p1_poisoned; // @[util.scala:73:26] wire r_uop_newuop_2_iw_p2_poisoned = next_uop_2_iw_p2_poisoned; // @[util.scala:73:26] wire r_uop_newuop_2_is_br = next_uop_2_is_br; // @[util.scala:73:26] wire r_uop_newuop_2_is_jalr = next_uop_2_is_jalr; // @[util.scala:73:26] wire r_uop_newuop_2_is_jal = next_uop_2_is_jal; // @[util.scala:73:26] wire r_uop_newuop_2_is_sfb = next_uop_2_is_sfb; // @[util.scala:73:26] wire [3:0] r_uop_newuop_2_br_tag = next_uop_2_br_tag; // @[util.scala:73:26] wire [4:0] r_uop_newuop_2_ftq_idx = next_uop_2_ftq_idx; // @[util.scala:73:26] wire r_uop_newuop_2_edge_inst = next_uop_2_edge_inst; // @[util.scala:73:26] wire [5:0] r_uop_newuop_2_pc_lob = next_uop_2_pc_lob; // @[util.scala:73:26] wire r_uop_newuop_2_taken = next_uop_2_taken; // @[util.scala:73:26] wire [19:0] r_uop_newuop_2_imm_packed = next_uop_2_imm_packed; // @[util.scala:73:26] wire [11:0] r_uop_newuop_2_csr_addr = next_uop_2_csr_addr; // @[util.scala:73:26] wire [6:0] r_uop_newuop_2_rob_idx = next_uop_2_rob_idx; // @[util.scala:73:26] wire [4:0] r_uop_newuop_2_ldq_idx = next_uop_2_ldq_idx; // @[util.scala:73:26] wire [4:0] r_uop_newuop_2_stq_idx = next_uop_2_stq_idx; // @[util.scala:73:26] wire [1:0] r_uop_newuop_2_rxq_idx = next_uop_2_rxq_idx; // @[util.scala:73:26] wire [6:0] r_uop_newuop_2_pdst = next_uop_2_pdst; // @[util.scala:73:26] wire [6:0] r_uop_newuop_2_prs1 = next_uop_2_prs1; // @[util.scala:73:26] wire [6:0] r_uop_newuop_2_prs2 = next_uop_2_prs2; // @[util.scala:73:26] wire [6:0] r_uop_newuop_2_prs3 = next_uop_2_prs3; // @[util.scala:73:26] wire [4:0] r_uop_newuop_2_ppred = next_uop_2_ppred; // @[util.scala:73:26] wire r_uop_newuop_2_prs1_busy = next_uop_2_prs1_busy; // @[util.scala:73:26] wire r_uop_newuop_2_prs2_busy = next_uop_2_prs2_busy; // @[util.scala:73:26] wire r_uop_newuop_2_prs3_busy = next_uop_2_prs3_busy; // @[util.scala:73:26] wire r_uop_newuop_2_ppred_busy = next_uop_2_ppred_busy; // @[util.scala:73:26] wire [6:0] r_uop_newuop_2_stale_pdst = next_uop_2_stale_pdst; // @[util.scala:73:26] wire r_uop_newuop_2_exception = next_uop_2_exception; // @[util.scala:73:26] wire [63:0] r_uop_newuop_2_exc_cause = next_uop_2_exc_cause; // @[util.scala:73:26] wire r_uop_newuop_2_bypassable = next_uop_2_bypassable; // @[util.scala:73:26] wire [4:0] r_uop_newuop_2_mem_cmd = next_uop_2_mem_cmd; // @[util.scala:73:26] wire [1:0] r_uop_newuop_2_mem_size = next_uop_2_mem_size; // @[util.scala:73:26] wire r_uop_newuop_2_mem_signed = next_uop_2_mem_signed; // @[util.scala:73:26] wire r_uop_newuop_2_is_fence = next_uop_2_is_fence; // @[util.scala:73:26] wire r_uop_newuop_2_is_fencei = next_uop_2_is_fencei; // @[util.scala:73:26] wire r_uop_newuop_2_is_amo = next_uop_2_is_amo; // @[util.scala:73:26] wire r_uop_newuop_2_uses_ldq = next_uop_2_uses_ldq; // @[util.scala:73:26] wire r_uop_newuop_2_uses_stq = next_uop_2_uses_stq; // @[util.scala:73:26] wire r_uop_newuop_2_is_sys_pc2epc = next_uop_2_is_sys_pc2epc; // @[util.scala:73:26] wire r_uop_newuop_2_is_unique = next_uop_2_is_unique; // @[util.scala:73:26] wire r_uop_newuop_2_flush_on_commit = next_uop_2_flush_on_commit; // @[util.scala:73:26] wire r_uop_newuop_2_ldst_is_rs1 = next_uop_2_ldst_is_rs1; // @[util.scala:73:26] wire [5:0] r_uop_newuop_2_ldst = next_uop_2_ldst; // @[util.scala:73:26] wire [5:0] r_uop_newuop_2_lrs1 = next_uop_2_lrs1; // @[util.scala:73:26] wire [5:0] r_uop_newuop_2_lrs2 = next_uop_2_lrs2; // @[util.scala:73:26] wire [5:0] r_uop_newuop_2_lrs3 = next_uop_2_lrs3; // @[util.scala:73:26] wire r_uop_newuop_2_ldst_val = next_uop_2_ldst_val; // @[util.scala:73:26] wire [1:0] r_uop_newuop_2_dst_rtype = next_uop_2_dst_rtype; // @[util.scala:73:26] wire [1:0] r_uop_newuop_2_lrs1_rtype = next_uop_2_lrs1_rtype; // @[util.scala:73:26] wire [1:0] r_uop_newuop_2_lrs2_rtype = next_uop_2_lrs2_rtype; // @[util.scala:73:26] wire r_uop_newuop_2_frs3_en = next_uop_2_frs3_en; // @[util.scala:73:26] wire r_uop_newuop_2_fp_val = next_uop_2_fp_val; // @[util.scala:73:26] wire r_uop_newuop_2_fp_single = next_uop_2_fp_single; // @[util.scala:73:26] wire r_uop_newuop_2_xcpt_pf_if = next_uop_2_xcpt_pf_if; // @[util.scala:73:26] wire r_uop_newuop_2_xcpt_ae_if = next_uop_2_xcpt_ae_if; // @[util.scala:73:26] wire r_uop_newuop_2_xcpt_ma_if = next_uop_2_xcpt_ma_if; // @[util.scala:73:26] wire r_uop_newuop_2_bp_debug_if = next_uop_2_bp_debug_if; // @[util.scala:73:26] wire r_uop_newuop_2_bp_xcpt_if = next_uop_2_bp_xcpt_if; // @[util.scala:73:26] wire [1:0] r_uop_newuop_2_debug_fsrc = next_uop_2_debug_fsrc; // @[util.scala:73:26] wire [1:0] r_uop_newuop_2_debug_tsrc = next_uop_2_debug_tsrc; // @[util.scala:73:26] wire [15:0] next_uop_2_br_mask; // @[rename-stage.scala:123:24] wire _r_valid_T_4 = ~io_dis_fire_2_0; // @[rename-stage.scala:133:29, :356:7] wire _r_valid_T_5 = r_valid_2 & _r_valid_T_4; // @[rename-stage.scala:121:27, :133:{26,29}] assign next_uop_2_uopc = _GEN ? r_uop_2_uopc : ren1_uops_2_uopc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_inst = _GEN ? r_uop_2_inst : ren1_uops_2_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_debug_inst = _GEN ? r_uop_2_debug_inst : ren1_uops_2_debug_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_is_rvc = _GEN ? r_uop_2_is_rvc : ren1_uops_2_is_rvc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_debug_pc = _GEN ? r_uop_2_debug_pc : ren1_uops_2_debug_pc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_iq_type = _GEN ? r_uop_2_iq_type : ren1_uops_2_iq_type; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_fu_code = _GEN ? r_uop_2_fu_code : ren1_uops_2_fu_code; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ctrl_br_type = _GEN ? r_uop_2_ctrl_br_type : 4'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ctrl_op1_sel = _GEN ? r_uop_2_ctrl_op1_sel : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ctrl_op2_sel = _GEN ? r_uop_2_ctrl_op2_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ctrl_imm_sel = _GEN ? r_uop_2_ctrl_imm_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ctrl_op_fcn = _GEN ? r_uop_2_ctrl_op_fcn : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ctrl_fcn_dw = _GEN & r_uop_2_ctrl_fcn_dw; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ctrl_csr_cmd = _GEN ? r_uop_2_ctrl_csr_cmd : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ctrl_is_load = _GEN & r_uop_2_ctrl_is_load; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ctrl_is_sta = _GEN & r_uop_2_ctrl_is_sta; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ctrl_is_std = _GEN & r_uop_2_ctrl_is_std; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_iw_state = _GEN ? r_uop_2_iw_state : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_iw_p1_poisoned = _GEN & r_uop_2_iw_p1_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_iw_p2_poisoned = _GEN & r_uop_2_iw_p2_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_is_br = _GEN ? r_uop_2_is_br : ren1_uops_2_is_br; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_is_jalr = _GEN ? r_uop_2_is_jalr : ren1_uops_2_is_jalr; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_is_jal = _GEN ? r_uop_2_is_jal : ren1_uops_2_is_jal; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_is_sfb = _GEN ? r_uop_2_is_sfb : ren1_uops_2_is_sfb; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_br_mask = _GEN ? r_uop_2_br_mask : ren1_uops_2_br_mask; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_br_tag = _GEN ? r_uop_2_br_tag : ren1_uops_2_br_tag; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ftq_idx = _GEN ? r_uop_2_ftq_idx : ren1_uops_2_ftq_idx; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_edge_inst = _GEN ? r_uop_2_edge_inst : ren1_uops_2_edge_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_pc_lob = _GEN ? r_uop_2_pc_lob : ren1_uops_2_pc_lob; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_taken = _GEN ? r_uop_2_taken : ren1_uops_2_taken; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_imm_packed = _GEN ? r_uop_2_imm_packed : ren1_uops_2_imm_packed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_csr_addr = _GEN ? r_uop_2_csr_addr : 12'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_rob_idx = _GEN ? r_uop_2_rob_idx : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ldq_idx = _GEN ? r_uop_2_ldq_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_stq_idx = _GEN ? r_uop_2_stq_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_rxq_idx = _GEN ? r_uop_2_rxq_idx : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_pdst = _GEN ? r_uop_2_pdst : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_prs1 = _GEN ? r_uop_2_prs1 : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_prs2 = _GEN ? r_uop_2_prs2 : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_prs3 = _GEN ? r_uop_2_prs3 : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ppred = _GEN ? r_uop_2_ppred : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_prs1_busy = _GEN & r_uop_2_prs1_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_prs2_busy = _GEN & r_uop_2_prs2_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_prs3_busy = _GEN & r_uop_2_prs3_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ppred_busy = _GEN & r_uop_2_ppred_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_stale_pdst = _GEN ? r_uop_2_stale_pdst : 7'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_exception = _GEN ? r_uop_2_exception : ren1_uops_2_exception; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_exc_cause = _GEN ? r_uop_2_exc_cause : ren1_uops_2_exc_cause; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_bypassable = _GEN ? r_uop_2_bypassable : ren1_uops_2_bypassable; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_mem_cmd = _GEN ? r_uop_2_mem_cmd : ren1_uops_2_mem_cmd; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_mem_size = _GEN ? r_uop_2_mem_size : ren1_uops_2_mem_size; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_mem_signed = _GEN ? r_uop_2_mem_signed : ren1_uops_2_mem_signed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_is_fence = _GEN ? r_uop_2_is_fence : ren1_uops_2_is_fence; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_is_fencei = _GEN ? r_uop_2_is_fencei : ren1_uops_2_is_fencei; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_is_amo = _GEN ? r_uop_2_is_amo : ren1_uops_2_is_amo; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_uses_ldq = _GEN ? r_uop_2_uses_ldq : ren1_uops_2_uses_ldq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_uses_stq = _GEN ? r_uop_2_uses_stq : ren1_uops_2_uses_stq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_is_sys_pc2epc = _GEN ? r_uop_2_is_sys_pc2epc : ren1_uops_2_is_sys_pc2epc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_is_unique = _GEN ? r_uop_2_is_unique : ren1_uops_2_is_unique; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_flush_on_commit = _GEN ? r_uop_2_flush_on_commit : ren1_uops_2_flush_on_commit; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ldst_is_rs1 = _GEN & r_uop_2_ldst_is_rs1; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ldst = _GEN ? r_uop_2_ldst : ren1_uops_2_ldst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_lrs1 = _GEN ? r_uop_2_lrs1 : ren1_uops_2_lrs1; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_lrs2 = _GEN ? r_uop_2_lrs2 : ren1_uops_2_lrs2; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_lrs3 = _GEN ? r_uop_2_lrs3 : ren1_uops_2_lrs3; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_ldst_val = _GEN ? r_uop_2_ldst_val : ren1_uops_2_ldst_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_dst_rtype = _GEN ? r_uop_2_dst_rtype : ren1_uops_2_dst_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_lrs1_rtype = _GEN ? r_uop_2_lrs1_rtype : ren1_uops_2_lrs1_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_lrs2_rtype = _GEN ? r_uop_2_lrs2_rtype : ren1_uops_2_lrs2_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_frs3_en = _GEN ? r_uop_2_frs3_en : ren1_uops_2_frs3_en; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_fp_val = _GEN ? r_uop_2_fp_val : ren1_uops_2_fp_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_fp_single = _GEN ? r_uop_2_fp_single : ren1_uops_2_fp_single; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_xcpt_pf_if = _GEN ? r_uop_2_xcpt_pf_if : ren1_uops_2_xcpt_pf_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_xcpt_ae_if = _GEN ? r_uop_2_xcpt_ae_if : ren1_uops_2_xcpt_ae_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_xcpt_ma_if = _GEN & r_uop_2_xcpt_ma_if; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_bp_debug_if = _GEN ? r_uop_2_bp_debug_if : ren1_uops_2_bp_debug_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_bp_xcpt_if = _GEN ? r_uop_2_bp_xcpt_if : ren1_uops_2_bp_xcpt_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_debug_fsrc = _GEN ? r_uop_2_debug_fsrc : ren1_uops_2_debug_fsrc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_2_debug_tsrc = _GEN ? r_uop_2_debug_tsrc : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] wire [15:0] _r_uop_newuop_br_mask_T_5; // @[util.scala:74:35] wire [15:0] r_uop_newuop_2_br_mask; // @[util.scala:73:26] wire [15:0] _r_uop_newuop_br_mask_T_4 = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:74:37] assign _r_uop_newuop_br_mask_T_5 = next_uop_2_br_mask & _r_uop_newuop_br_mask_T_4; // @[util.scala:74:{35,37}] assign r_uop_newuop_2_br_mask = _r_uop_newuop_br_mask_T_5; // @[util.scala:73:26, :74:35] reg busy_table_0; // @[rename-stage.scala:368:27] wire _io_ren2_uops_0_ppred_busy_T = busy_table_0; // @[rename-stage.scala:368:27, :390:63] wire _io_ren2_uops_1_ppred_busy_T = busy_table_0; // @[rename-stage.scala:368:27, :390:63] wire _io_ren2_uops_2_ppred_busy_T = busy_table_0; // @[rename-stage.scala:368:27, :390:63] reg busy_table_1; // @[rename-stage.scala:368:27] reg busy_table_2; // @[rename-stage.scala:368:27] reg busy_table_3; // @[rename-stage.scala:368:27] reg busy_table_4; // @[rename-stage.scala:368:27] reg busy_table_5; // @[rename-stage.scala:368:27] reg busy_table_6; // @[rename-stage.scala:368:27] reg busy_table_7; // @[rename-stage.scala:368:27] reg busy_table_8; // @[rename-stage.scala:368:27] reg busy_table_9; // @[rename-stage.scala:368:27] reg busy_table_10; // @[rename-stage.scala:368:27] reg busy_table_11; // @[rename-stage.scala:368:27] reg busy_table_12; // @[rename-stage.scala:368:27] reg busy_table_13; // @[rename-stage.scala:368:27] reg busy_table_14; // @[rename-stage.scala:368:27] reg busy_table_15; // @[rename-stage.scala:368:27] reg busy_table_16; // @[rename-stage.scala:368:27] reg busy_table_17; // @[rename-stage.scala:368:27] reg busy_table_18; // @[rename-stage.scala:368:27] reg busy_table_19; // @[rename-stage.scala:368:27] reg busy_table_20; // @[rename-stage.scala:368:27] reg busy_table_21; // @[rename-stage.scala:368:27] reg busy_table_22; // @[rename-stage.scala:368:27] reg busy_table_23; // @[rename-stage.scala:368:27] reg busy_table_24; // @[rename-stage.scala:368:27] reg busy_table_25; // @[rename-stage.scala:368:27] reg busy_table_26; // @[rename-stage.scala:368:27] reg busy_table_27; // @[rename-stage.scala:368:27] reg busy_table_28; // @[rename-stage.scala:368:27] reg busy_table_29; // @[rename-stage.scala:368:27] reg busy_table_30; // @[rename-stage.scala:368:27] reg busy_table_31; // @[rename-stage.scala:368:27] wire _is_sfb_br_T = ren2_uops_0_is_br & ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29] wire _is_sfb_shadow_T = ~ren2_uops_0_is_br; // @[rename-stage.scala:108:29] wire _is_sfb_shadow_T_1 = _is_sfb_shadow_T & ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29] wire _io_ren2_uops_0_ppred_busy_T_2 = _io_ren2_uops_0_ppred_busy_T; // @[rename-stage.scala:390:{63,89}] wire _is_sfb_br_T_2 = ren2_uops_1_is_br & ren2_uops_1_is_sfb; // @[rename-stage.scala:108:29] wire _is_sfb_shadow_T_3 = ~ren2_uops_1_is_br; // @[rename-stage.scala:108:29] wire _is_sfb_shadow_T_4 = _is_sfb_shadow_T_3 & ren2_uops_1_is_sfb; // @[rename-stage.scala:108:29] wire _io_ren2_uops_1_ppred_busy_T_2 = _io_ren2_uops_1_ppred_busy_T; // @[rename-stage.scala:390:{63,89}] wire _is_sfb_br_T_4 = ren2_uops_2_is_br & ren2_uops_2_is_sfb; // @[rename-stage.scala:108:29] wire _is_sfb_shadow_T_6 = ~ren2_uops_2_is_br; // @[rename-stage.scala:108:29] wire _is_sfb_shadow_T_7 = _is_sfb_shadow_T_6 & ren2_uops_2_is_sfb; // @[rename-stage.scala:108:29] wire _io_ren2_uops_2_ppred_busy_T_2 = _io_ren2_uops_2_ppred_busy_T; // @[rename-stage.scala:390:{63,89}] wire [1:0] lo_lo_lo_lo = {busy_table_1, busy_table_0}; // @[rename-stage.scala:368:27, :402:30] wire [1:0] lo_lo_lo_hi = {busy_table_3, busy_table_2}; // @[rename-stage.scala:368:27, :402:30] wire [3:0] lo_lo_lo = {lo_lo_lo_hi, lo_lo_lo_lo}; // @[rename-stage.scala:402:30] wire [1:0] lo_lo_hi_lo = {busy_table_5, busy_table_4}; // @[rename-stage.scala:368:27, :402:30] wire [1:0] lo_lo_hi_hi = {busy_table_7, busy_table_6}; // @[rename-stage.scala:368:27, :402:30] wire [3:0] lo_lo_hi = {lo_lo_hi_hi, lo_lo_hi_lo}; // @[rename-stage.scala:402:30] wire [7:0] lo_lo = {lo_lo_hi, lo_lo_lo}; // @[rename-stage.scala:402:30] wire [1:0] lo_hi_lo_lo = {busy_table_9, busy_table_8}; // @[rename-stage.scala:368:27, :402:30] wire [1:0] lo_hi_lo_hi = {busy_table_11, busy_table_10}; // @[rename-stage.scala:368:27, :402:30] wire [3:0] lo_hi_lo = {lo_hi_lo_hi, lo_hi_lo_lo}; // @[rename-stage.scala:402:30] wire [1:0] lo_hi_hi_lo = {busy_table_13, busy_table_12}; // @[rename-stage.scala:368:27, :402:30] wire [1:0] lo_hi_hi_hi = {busy_table_15, busy_table_14}; // @[rename-stage.scala:368:27, :402:30] wire [3:0] lo_hi_hi = {lo_hi_hi_hi, lo_hi_hi_lo}; // @[rename-stage.scala:402:30] wire [7:0] lo_hi = {lo_hi_hi, lo_hi_lo}; // @[rename-stage.scala:402:30] wire [15:0] lo = {lo_hi, lo_lo}; // @[rename-stage.scala:402:30] wire [1:0] hi_lo_lo_lo = {busy_table_17, busy_table_16}; // @[rename-stage.scala:368:27, :402:30] wire [1:0] hi_lo_lo_hi = {busy_table_19, busy_table_18}; // @[rename-stage.scala:368:27, :402:30] wire [3:0] hi_lo_lo = {hi_lo_lo_hi, hi_lo_lo_lo}; // @[rename-stage.scala:402:30] wire [1:0] hi_lo_hi_lo = {busy_table_21, busy_table_20}; // @[rename-stage.scala:368:27, :402:30] wire [1:0] hi_lo_hi_hi = {busy_table_23, busy_table_22}; // @[rename-stage.scala:368:27, :402:30] wire [3:0] hi_lo_hi = {hi_lo_hi_hi, hi_lo_hi_lo}; // @[rename-stage.scala:402:30] wire [7:0] hi_lo = {hi_lo_hi, hi_lo_lo}; // @[rename-stage.scala:402:30] wire [1:0] hi_hi_lo_lo = {busy_table_25, busy_table_24}; // @[rename-stage.scala:368:27, :402:30] wire [1:0] hi_hi_lo_hi = {busy_table_27, busy_table_26}; // @[rename-stage.scala:368:27, :402:30] wire [3:0] hi_hi_lo = {hi_hi_lo_hi, hi_hi_lo_lo}; // @[rename-stage.scala:402:30] wire [1:0] hi_hi_hi_lo = {busy_table_29, busy_table_28}; // @[rename-stage.scala:368:27, :402:30] wire [1:0] hi_hi_hi_hi = {busy_table_31, busy_table_30}; // @[rename-stage.scala:368:27, :402:30] wire [3:0] hi_hi_hi = {hi_hi_hi_hi, hi_hi_hi_lo}; // @[rename-stage.scala:402:30] wire [7:0] hi_hi = {hi_hi_hi, hi_hi_lo}; // @[rename-stage.scala:402:30] wire [15:0] hi = {hi_hi, hi_lo}; // @[rename-stage.scala:402:30] always @(posedge clock) begin // @[rename-stage.scala:356:7] if (reset) begin // @[rename-stage.scala:356:7] r_valid <= 1'h0; // @[rename-stage.scala:121:27] r_valid_1 <= 1'h0; // @[rename-stage.scala:121:27] r_valid_2 <= 1'h0; // @[rename-stage.scala:121:27] busy_table_0 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_1 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_2 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_3 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_4 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_5 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_6 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_7 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_8 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_9 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_10 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_11 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_12 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_13 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_14 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_15 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_16 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_17 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_18 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_19 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_20 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_21 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_22 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_23 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_24 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_25 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_26 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_27 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_28 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_29 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_30 <= 1'h0; // @[rename-stage.scala:368:27] busy_table_31 <= 1'h0; // @[rename-stage.scala:368:27] end else begin // @[rename-stage.scala:356:7] r_valid <= ~io_kill_0 & (io_dis_ready_0 ? ren1_fire_0 : _r_valid_T_1); // @[rename-stage.scala:100:29, :121:27, :127:20, :128:15, :129:30, :130:15, :133:{15,26}, :356:7] r_valid_1 <= ~io_kill_0 & (io_dis_ready_0 ? ren1_fire_1 : _r_valid_T_3); // @[rename-stage.scala:100:29, :121:27, :127:20, :128:15, :129:30, :130:15, :133:{15,26}, :356:7] r_valid_2 <= ~io_kill_0 & (io_dis_ready_0 ? ren1_fire_2 : _r_valid_T_5); // @[rename-stage.scala:100:29, :121:27, :127:20, :128:15, :129:30, :130:15, :133:{15,26}, :356:7] busy_table_0 <= lo[0]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_1 <= lo[1]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_2 <= lo[2]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_3 <= lo[3]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_4 <= lo[4]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_5 <= lo[5]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_6 <= lo[6]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_7 <= lo[7]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_8 <= lo[8]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_9 <= lo[9]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_10 <= lo[10]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_11 <= lo[11]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_12 <= lo[12]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_13 <= lo[13]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_14 <= lo[14]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_15 <= lo[15]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_16 <= hi[0]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_17 <= hi[1]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_18 <= hi[2]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_19 <= hi[3]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_20 <= hi[4]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_21 <= hi[5]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_22 <= hi[6]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_23 <= hi[7]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_24 <= hi[8]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_25 <= hi[9]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_26 <= hi[10]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_27 <= hi[11]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_28 <= hi[12]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_29 <= hi[13]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_30 <= hi[14]; // @[rename-stage.scala:368:27, :402:{30,55,73}] busy_table_31 <= hi[15]; // @[rename-stage.scala:368:27, :402:{30,55,73}] end r_uop_uopc <= r_uop_newuop_uopc; // @[util.scala:73:26] r_uop_inst <= r_uop_newuop_inst; // @[util.scala:73:26] r_uop_debug_inst <= r_uop_newuop_debug_inst; // @[util.scala:73:26] r_uop_is_rvc <= r_uop_newuop_is_rvc; // @[util.scala:73:26] r_uop_debug_pc <= r_uop_newuop_debug_pc; // @[util.scala:73:26] r_uop_iq_type <= r_uop_newuop_iq_type; // @[util.scala:73:26] r_uop_fu_code <= r_uop_newuop_fu_code; // @[util.scala:73:26] r_uop_ctrl_br_type <= r_uop_newuop_ctrl_br_type; // @[util.scala:73:26] r_uop_ctrl_op1_sel <= r_uop_newuop_ctrl_op1_sel; // @[util.scala:73:26] r_uop_ctrl_op2_sel <= r_uop_newuop_ctrl_op2_sel; // @[util.scala:73:26] r_uop_ctrl_imm_sel <= r_uop_newuop_ctrl_imm_sel; // @[util.scala:73:26] r_uop_ctrl_op_fcn <= r_uop_newuop_ctrl_op_fcn; // @[util.scala:73:26] r_uop_ctrl_fcn_dw <= r_uop_newuop_ctrl_fcn_dw; // @[util.scala:73:26] r_uop_ctrl_csr_cmd <= r_uop_newuop_ctrl_csr_cmd; // @[util.scala:73:26] r_uop_ctrl_is_load <= r_uop_newuop_ctrl_is_load; // @[util.scala:73:26] r_uop_ctrl_is_sta <= r_uop_newuop_ctrl_is_sta; // @[util.scala:73:26] r_uop_ctrl_is_std <= r_uop_newuop_ctrl_is_std; // @[util.scala:73:26] r_uop_iw_state <= r_uop_newuop_iw_state; // @[util.scala:73:26] r_uop_iw_p1_poisoned <= r_uop_newuop_iw_p1_poisoned; // @[util.scala:73:26] r_uop_iw_p2_poisoned <= r_uop_newuop_iw_p2_poisoned; // @[util.scala:73:26] r_uop_is_br <= r_uop_newuop_is_br; // @[util.scala:73:26] r_uop_is_jalr <= r_uop_newuop_is_jalr; // @[util.scala:73:26] r_uop_is_jal <= r_uop_newuop_is_jal; // @[util.scala:73:26] r_uop_is_sfb <= r_uop_newuop_is_sfb; // @[util.scala:73:26] r_uop_br_mask <= r_uop_newuop_br_mask; // @[util.scala:73:26] r_uop_br_tag <= r_uop_newuop_br_tag; // @[util.scala:73:26] r_uop_ftq_idx <= r_uop_newuop_ftq_idx; // @[util.scala:73:26] r_uop_edge_inst <= r_uop_newuop_edge_inst; // @[util.scala:73:26] r_uop_pc_lob <= r_uop_newuop_pc_lob; // @[util.scala:73:26] r_uop_taken <= r_uop_newuop_taken; // @[util.scala:73:26] r_uop_imm_packed <= r_uop_newuop_imm_packed; // @[util.scala:73:26] r_uop_csr_addr <= r_uop_newuop_csr_addr; // @[util.scala:73:26] r_uop_rob_idx <= r_uop_newuop_rob_idx; // @[util.scala:73:26] r_uop_ldq_idx <= r_uop_newuop_ldq_idx; // @[util.scala:73:26] r_uop_stq_idx <= r_uop_newuop_stq_idx; // @[util.scala:73:26] r_uop_rxq_idx <= r_uop_newuop_rxq_idx; // @[util.scala:73:26] r_uop_pdst <= r_uop_newuop_pdst; // @[util.scala:73:26] r_uop_prs1 <= r_uop_newuop_prs1; // @[util.scala:73:26] r_uop_prs2 <= r_uop_newuop_prs2; // @[util.scala:73:26] r_uop_prs3 <= r_uop_newuop_prs3; // @[util.scala:73:26] r_uop_ppred <= r_uop_newuop_ppred; // @[util.scala:73:26] r_uop_prs1_busy <= r_uop_newuop_prs1_busy; // @[util.scala:73:26] r_uop_prs2_busy <= r_uop_newuop_prs2_busy; // @[util.scala:73:26] r_uop_prs3_busy <= r_uop_newuop_prs3_busy; // @[util.scala:73:26] r_uop_ppred_busy <= r_uop_newuop_ppred_busy; // @[util.scala:73:26] r_uop_stale_pdst <= r_uop_newuop_stale_pdst; // @[util.scala:73:26] r_uop_exception <= r_uop_newuop_exception; // @[util.scala:73:26] r_uop_exc_cause <= r_uop_newuop_exc_cause; // @[util.scala:73:26] r_uop_bypassable <= r_uop_newuop_bypassable; // @[util.scala:73:26] r_uop_mem_cmd <= r_uop_newuop_mem_cmd; // @[util.scala:73:26] r_uop_mem_size <= r_uop_newuop_mem_size; // @[util.scala:73:26] r_uop_mem_signed <= r_uop_newuop_mem_signed; // @[util.scala:73:26] r_uop_is_fence <= r_uop_newuop_is_fence; // @[util.scala:73:26] r_uop_is_fencei <= r_uop_newuop_is_fencei; // @[util.scala:73:26] r_uop_is_amo <= r_uop_newuop_is_amo; // @[util.scala:73:26] r_uop_uses_ldq <= r_uop_newuop_uses_ldq; // @[util.scala:73:26] r_uop_uses_stq <= r_uop_newuop_uses_stq; // @[util.scala:73:26] r_uop_is_sys_pc2epc <= r_uop_newuop_is_sys_pc2epc; // @[util.scala:73:26] r_uop_is_unique <= r_uop_newuop_is_unique; // @[util.scala:73:26] r_uop_flush_on_commit <= r_uop_newuop_flush_on_commit; // @[util.scala:73:26] r_uop_ldst_is_rs1 <= r_uop_newuop_ldst_is_rs1; // @[util.scala:73:26] r_uop_ldst <= r_uop_newuop_ldst; // @[util.scala:73:26] r_uop_lrs1 <= r_uop_newuop_lrs1; // @[util.scala:73:26] r_uop_lrs2 <= r_uop_newuop_lrs2; // @[util.scala:73:26] r_uop_lrs3 <= r_uop_newuop_lrs3; // @[util.scala:73:26] r_uop_ldst_val <= r_uop_newuop_ldst_val; // @[util.scala:73:26] r_uop_dst_rtype <= r_uop_newuop_dst_rtype; // @[util.scala:73:26] r_uop_lrs1_rtype <= r_uop_newuop_lrs1_rtype; // @[util.scala:73:26] r_uop_lrs2_rtype <= r_uop_newuop_lrs2_rtype; // @[util.scala:73:26] r_uop_frs3_en <= r_uop_newuop_frs3_en; // @[util.scala:73:26] r_uop_fp_val <= r_uop_newuop_fp_val; // @[util.scala:73:26] r_uop_fp_single <= r_uop_newuop_fp_single; // @[util.scala:73:26] r_uop_xcpt_pf_if <= r_uop_newuop_xcpt_pf_if; // @[util.scala:73:26] r_uop_xcpt_ae_if <= r_uop_newuop_xcpt_ae_if; // @[util.scala:73:26] r_uop_xcpt_ma_if <= r_uop_newuop_xcpt_ma_if; // @[util.scala:73:26] r_uop_bp_debug_if <= r_uop_newuop_bp_debug_if; // @[util.scala:73:26] r_uop_bp_xcpt_if <= r_uop_newuop_bp_xcpt_if; // @[util.scala:73:26] r_uop_debug_fsrc <= r_uop_newuop_debug_fsrc; // @[util.scala:73:26] r_uop_debug_tsrc <= r_uop_newuop_debug_tsrc; // @[util.scala:73:26] r_uop_1_uopc <= r_uop_newuop_1_uopc; // @[util.scala:73:26] r_uop_1_inst <= r_uop_newuop_1_inst; // @[util.scala:73:26] r_uop_1_debug_inst <= r_uop_newuop_1_debug_inst; // @[util.scala:73:26] r_uop_1_is_rvc <= r_uop_newuop_1_is_rvc; // @[util.scala:73:26] r_uop_1_debug_pc <= r_uop_newuop_1_debug_pc; // @[util.scala:73:26] r_uop_1_iq_type <= r_uop_newuop_1_iq_type; // @[util.scala:73:26] r_uop_1_fu_code <= r_uop_newuop_1_fu_code; // @[util.scala:73:26] r_uop_1_ctrl_br_type <= r_uop_newuop_1_ctrl_br_type; // @[util.scala:73:26] r_uop_1_ctrl_op1_sel <= r_uop_newuop_1_ctrl_op1_sel; // @[util.scala:73:26] r_uop_1_ctrl_op2_sel <= r_uop_newuop_1_ctrl_op2_sel; // @[util.scala:73:26] r_uop_1_ctrl_imm_sel <= r_uop_newuop_1_ctrl_imm_sel; // @[util.scala:73:26] r_uop_1_ctrl_op_fcn <= r_uop_newuop_1_ctrl_op_fcn; // @[util.scala:73:26] r_uop_1_ctrl_fcn_dw <= r_uop_newuop_1_ctrl_fcn_dw; // @[util.scala:73:26] r_uop_1_ctrl_csr_cmd <= r_uop_newuop_1_ctrl_csr_cmd; // @[util.scala:73:26] r_uop_1_ctrl_is_load <= r_uop_newuop_1_ctrl_is_load; // @[util.scala:73:26] r_uop_1_ctrl_is_sta <= r_uop_newuop_1_ctrl_is_sta; // @[util.scala:73:26] r_uop_1_ctrl_is_std <= r_uop_newuop_1_ctrl_is_std; // @[util.scala:73:26] r_uop_1_iw_state <= r_uop_newuop_1_iw_state; // @[util.scala:73:26] r_uop_1_iw_p1_poisoned <= r_uop_newuop_1_iw_p1_poisoned; // @[util.scala:73:26] r_uop_1_iw_p2_poisoned <= r_uop_newuop_1_iw_p2_poisoned; // @[util.scala:73:26] r_uop_1_is_br <= r_uop_newuop_1_is_br; // @[util.scala:73:26] r_uop_1_is_jalr <= r_uop_newuop_1_is_jalr; // @[util.scala:73:26] r_uop_1_is_jal <= r_uop_newuop_1_is_jal; // @[util.scala:73:26] r_uop_1_is_sfb <= r_uop_newuop_1_is_sfb; // @[util.scala:73:26] r_uop_1_br_mask <= r_uop_newuop_1_br_mask; // @[util.scala:73:26] r_uop_1_br_tag <= r_uop_newuop_1_br_tag; // @[util.scala:73:26] r_uop_1_ftq_idx <= r_uop_newuop_1_ftq_idx; // @[util.scala:73:26] r_uop_1_edge_inst <= r_uop_newuop_1_edge_inst; // @[util.scala:73:26] r_uop_1_pc_lob <= r_uop_newuop_1_pc_lob; // @[util.scala:73:26] r_uop_1_taken <= r_uop_newuop_1_taken; // @[util.scala:73:26] r_uop_1_imm_packed <= r_uop_newuop_1_imm_packed; // @[util.scala:73:26] r_uop_1_csr_addr <= r_uop_newuop_1_csr_addr; // @[util.scala:73:26] r_uop_1_rob_idx <= r_uop_newuop_1_rob_idx; // @[util.scala:73:26] r_uop_1_ldq_idx <= r_uop_newuop_1_ldq_idx; // @[util.scala:73:26] r_uop_1_stq_idx <= r_uop_newuop_1_stq_idx; // @[util.scala:73:26] r_uop_1_rxq_idx <= r_uop_newuop_1_rxq_idx; // @[util.scala:73:26] r_uop_1_pdst <= r_uop_newuop_1_pdst; // @[util.scala:73:26] r_uop_1_prs1 <= r_uop_newuop_1_prs1; // @[util.scala:73:26] r_uop_1_prs2 <= r_uop_newuop_1_prs2; // @[util.scala:73:26] r_uop_1_prs3 <= r_uop_newuop_1_prs3; // @[util.scala:73:26] r_uop_1_ppred <= r_uop_newuop_1_ppred; // @[util.scala:73:26] r_uop_1_prs1_busy <= r_uop_newuop_1_prs1_busy; // @[util.scala:73:26] r_uop_1_prs2_busy <= r_uop_newuop_1_prs2_busy; // @[util.scala:73:26] r_uop_1_prs3_busy <= r_uop_newuop_1_prs3_busy; // @[util.scala:73:26] r_uop_1_ppred_busy <= r_uop_newuop_1_ppred_busy; // @[util.scala:73:26] r_uop_1_stale_pdst <= r_uop_newuop_1_stale_pdst; // @[util.scala:73:26] r_uop_1_exception <= r_uop_newuop_1_exception; // @[util.scala:73:26] r_uop_1_exc_cause <= r_uop_newuop_1_exc_cause; // @[util.scala:73:26] r_uop_1_bypassable <= r_uop_newuop_1_bypassable; // @[util.scala:73:26] r_uop_1_mem_cmd <= r_uop_newuop_1_mem_cmd; // @[util.scala:73:26] r_uop_1_mem_size <= r_uop_newuop_1_mem_size; // @[util.scala:73:26] r_uop_1_mem_signed <= r_uop_newuop_1_mem_signed; // @[util.scala:73:26] r_uop_1_is_fence <= r_uop_newuop_1_is_fence; // @[util.scala:73:26] r_uop_1_is_fencei <= r_uop_newuop_1_is_fencei; // @[util.scala:73:26] r_uop_1_is_amo <= r_uop_newuop_1_is_amo; // @[util.scala:73:26] r_uop_1_uses_ldq <= r_uop_newuop_1_uses_ldq; // @[util.scala:73:26] r_uop_1_uses_stq <= r_uop_newuop_1_uses_stq; // @[util.scala:73:26] r_uop_1_is_sys_pc2epc <= r_uop_newuop_1_is_sys_pc2epc; // @[util.scala:73:26] r_uop_1_is_unique <= r_uop_newuop_1_is_unique; // @[util.scala:73:26] r_uop_1_flush_on_commit <= r_uop_newuop_1_flush_on_commit; // @[util.scala:73:26] r_uop_1_ldst_is_rs1 <= r_uop_newuop_1_ldst_is_rs1; // @[util.scala:73:26] r_uop_1_ldst <= r_uop_newuop_1_ldst; // @[util.scala:73:26] r_uop_1_lrs1 <= r_uop_newuop_1_lrs1; // @[util.scala:73:26] r_uop_1_lrs2 <= r_uop_newuop_1_lrs2; // @[util.scala:73:26] r_uop_1_lrs3 <= r_uop_newuop_1_lrs3; // @[util.scala:73:26] r_uop_1_ldst_val <= r_uop_newuop_1_ldst_val; // @[util.scala:73:26] r_uop_1_dst_rtype <= r_uop_newuop_1_dst_rtype; // @[util.scala:73:26] r_uop_1_lrs1_rtype <= r_uop_newuop_1_lrs1_rtype; // @[util.scala:73:26] r_uop_1_lrs2_rtype <= r_uop_newuop_1_lrs2_rtype; // @[util.scala:73:26] r_uop_1_frs3_en <= r_uop_newuop_1_frs3_en; // @[util.scala:73:26] r_uop_1_fp_val <= r_uop_newuop_1_fp_val; // @[util.scala:73:26] r_uop_1_fp_single <= r_uop_newuop_1_fp_single; // @[util.scala:73:26] r_uop_1_xcpt_pf_if <= r_uop_newuop_1_xcpt_pf_if; // @[util.scala:73:26] r_uop_1_xcpt_ae_if <= r_uop_newuop_1_xcpt_ae_if; // @[util.scala:73:26] r_uop_1_xcpt_ma_if <= r_uop_newuop_1_xcpt_ma_if; // @[util.scala:73:26] r_uop_1_bp_debug_if <= r_uop_newuop_1_bp_debug_if; // @[util.scala:73:26] r_uop_1_bp_xcpt_if <= r_uop_newuop_1_bp_xcpt_if; // @[util.scala:73:26] r_uop_1_debug_fsrc <= r_uop_newuop_1_debug_fsrc; // @[util.scala:73:26] r_uop_1_debug_tsrc <= r_uop_newuop_1_debug_tsrc; // @[util.scala:73:26] r_uop_2_uopc <= r_uop_newuop_2_uopc; // @[util.scala:73:26] r_uop_2_inst <= r_uop_newuop_2_inst; // @[util.scala:73:26] r_uop_2_debug_inst <= r_uop_newuop_2_debug_inst; // @[util.scala:73:26] r_uop_2_is_rvc <= r_uop_newuop_2_is_rvc; // @[util.scala:73:26] r_uop_2_debug_pc <= r_uop_newuop_2_debug_pc; // @[util.scala:73:26] r_uop_2_iq_type <= r_uop_newuop_2_iq_type; // @[util.scala:73:26] r_uop_2_fu_code <= r_uop_newuop_2_fu_code; // @[util.scala:73:26] r_uop_2_ctrl_br_type <= r_uop_newuop_2_ctrl_br_type; // @[util.scala:73:26] r_uop_2_ctrl_op1_sel <= r_uop_newuop_2_ctrl_op1_sel; // @[util.scala:73:26] r_uop_2_ctrl_op2_sel <= r_uop_newuop_2_ctrl_op2_sel; // @[util.scala:73:26] r_uop_2_ctrl_imm_sel <= r_uop_newuop_2_ctrl_imm_sel; // @[util.scala:73:26] r_uop_2_ctrl_op_fcn <= r_uop_newuop_2_ctrl_op_fcn; // @[util.scala:73:26] r_uop_2_ctrl_fcn_dw <= r_uop_newuop_2_ctrl_fcn_dw; // @[util.scala:73:26] r_uop_2_ctrl_csr_cmd <= r_uop_newuop_2_ctrl_csr_cmd; // @[util.scala:73:26] r_uop_2_ctrl_is_load <= r_uop_newuop_2_ctrl_is_load; // @[util.scala:73:26] r_uop_2_ctrl_is_sta <= r_uop_newuop_2_ctrl_is_sta; // @[util.scala:73:26] r_uop_2_ctrl_is_std <= r_uop_newuop_2_ctrl_is_std; // @[util.scala:73:26] r_uop_2_iw_state <= r_uop_newuop_2_iw_state; // @[util.scala:73:26] r_uop_2_iw_p1_poisoned <= r_uop_newuop_2_iw_p1_poisoned; // @[util.scala:73:26] r_uop_2_iw_p2_poisoned <= r_uop_newuop_2_iw_p2_poisoned; // @[util.scala:73:26] r_uop_2_is_br <= r_uop_newuop_2_is_br; // @[util.scala:73:26] r_uop_2_is_jalr <= r_uop_newuop_2_is_jalr; // @[util.scala:73:26] r_uop_2_is_jal <= r_uop_newuop_2_is_jal; // @[util.scala:73:26] r_uop_2_is_sfb <= r_uop_newuop_2_is_sfb; // @[util.scala:73:26] r_uop_2_br_mask <= r_uop_newuop_2_br_mask; // @[util.scala:73:26] r_uop_2_br_tag <= r_uop_newuop_2_br_tag; // @[util.scala:73:26] r_uop_2_ftq_idx <= r_uop_newuop_2_ftq_idx; // @[util.scala:73:26] r_uop_2_edge_inst <= r_uop_newuop_2_edge_inst; // @[util.scala:73:26] r_uop_2_pc_lob <= r_uop_newuop_2_pc_lob; // @[util.scala:73:26] r_uop_2_taken <= r_uop_newuop_2_taken; // @[util.scala:73:26] r_uop_2_imm_packed <= r_uop_newuop_2_imm_packed; // @[util.scala:73:26] r_uop_2_csr_addr <= r_uop_newuop_2_csr_addr; // @[util.scala:73:26] r_uop_2_rob_idx <= r_uop_newuop_2_rob_idx; // @[util.scala:73:26] r_uop_2_ldq_idx <= r_uop_newuop_2_ldq_idx; // @[util.scala:73:26] r_uop_2_stq_idx <= r_uop_newuop_2_stq_idx; // @[util.scala:73:26] r_uop_2_rxq_idx <= r_uop_newuop_2_rxq_idx; // @[util.scala:73:26] r_uop_2_pdst <= r_uop_newuop_2_pdst; // @[util.scala:73:26] r_uop_2_prs1 <= r_uop_newuop_2_prs1; // @[util.scala:73:26] r_uop_2_prs2 <= r_uop_newuop_2_prs2; // @[util.scala:73:26] r_uop_2_prs3 <= r_uop_newuop_2_prs3; // @[util.scala:73:26] r_uop_2_ppred <= r_uop_newuop_2_ppred; // @[util.scala:73:26] r_uop_2_prs1_busy <= r_uop_newuop_2_prs1_busy; // @[util.scala:73:26] r_uop_2_prs2_busy <= r_uop_newuop_2_prs2_busy; // @[util.scala:73:26] r_uop_2_prs3_busy <= r_uop_newuop_2_prs3_busy; // @[util.scala:73:26] r_uop_2_ppred_busy <= r_uop_newuop_2_ppred_busy; // @[util.scala:73:26] r_uop_2_stale_pdst <= r_uop_newuop_2_stale_pdst; // @[util.scala:73:26] r_uop_2_exception <= r_uop_newuop_2_exception; // @[util.scala:73:26] r_uop_2_exc_cause <= r_uop_newuop_2_exc_cause; // @[util.scala:73:26] r_uop_2_bypassable <= r_uop_newuop_2_bypassable; // @[util.scala:73:26] r_uop_2_mem_cmd <= r_uop_newuop_2_mem_cmd; // @[util.scala:73:26] r_uop_2_mem_size <= r_uop_newuop_2_mem_size; // @[util.scala:73:26] r_uop_2_mem_signed <= r_uop_newuop_2_mem_signed; // @[util.scala:73:26] r_uop_2_is_fence <= r_uop_newuop_2_is_fence; // @[util.scala:73:26] r_uop_2_is_fencei <= r_uop_newuop_2_is_fencei; // @[util.scala:73:26] r_uop_2_is_amo <= r_uop_newuop_2_is_amo; // @[util.scala:73:26] r_uop_2_uses_ldq <= r_uop_newuop_2_uses_ldq; // @[util.scala:73:26] r_uop_2_uses_stq <= r_uop_newuop_2_uses_stq; // @[util.scala:73:26] r_uop_2_is_sys_pc2epc <= r_uop_newuop_2_is_sys_pc2epc; // @[util.scala:73:26] r_uop_2_is_unique <= r_uop_newuop_2_is_unique; // @[util.scala:73:26] r_uop_2_flush_on_commit <= r_uop_newuop_2_flush_on_commit; // @[util.scala:73:26] r_uop_2_ldst_is_rs1 <= r_uop_newuop_2_ldst_is_rs1; // @[util.scala:73:26] r_uop_2_ldst <= r_uop_newuop_2_ldst; // @[util.scala:73:26] r_uop_2_lrs1 <= r_uop_newuop_2_lrs1; // @[util.scala:73:26] r_uop_2_lrs2 <= r_uop_newuop_2_lrs2; // @[util.scala:73:26] r_uop_2_lrs3 <= r_uop_newuop_2_lrs3; // @[util.scala:73:26] r_uop_2_ldst_val <= r_uop_newuop_2_ldst_val; // @[util.scala:73:26] r_uop_2_dst_rtype <= r_uop_newuop_2_dst_rtype; // @[util.scala:73:26] r_uop_2_lrs1_rtype <= r_uop_newuop_2_lrs1_rtype; // @[util.scala:73:26] r_uop_2_lrs2_rtype <= r_uop_newuop_2_lrs2_rtype; // @[util.scala:73:26] r_uop_2_frs3_en <= r_uop_newuop_2_frs3_en; // @[util.scala:73:26] r_uop_2_fp_val <= r_uop_newuop_2_fp_val; // @[util.scala:73:26] r_uop_2_fp_single <= r_uop_newuop_2_fp_single; // @[util.scala:73:26] r_uop_2_xcpt_pf_if <= r_uop_newuop_2_xcpt_pf_if; // @[util.scala:73:26] r_uop_2_xcpt_ae_if <= r_uop_newuop_2_xcpt_ae_if; // @[util.scala:73:26] r_uop_2_xcpt_ma_if <= r_uop_newuop_2_xcpt_ma_if; // @[util.scala:73:26] r_uop_2_bp_debug_if <= r_uop_newuop_2_bp_debug_if; // @[util.scala:73:26] r_uop_2_bp_xcpt_if <= r_uop_newuop_2_bp_xcpt_if; // @[util.scala:73:26] r_uop_2_debug_fsrc <= r_uop_newuop_2_debug_fsrc; // @[util.scala:73:26] r_uop_2_debug_tsrc <= r_uop_newuop_2_debug_tsrc; // @[util.scala:73:26] always @(posedge) endmodule
Generate the Verilog code corresponding to the following Chisel files. File Buffer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.BufferParams class TLBufferNode ( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit valName: ValName) extends TLAdapterNode( clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) }, managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) } ) { override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}" override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none) } class TLBuffer( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters) extends LazyModule { def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace) def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde) def this()(implicit p: Parameters) = this(BufferParams.default) val node = new TLBufferNode(a, b, c, d, e) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def headBundle = node.out.head._2.bundle override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.a <> a(in .a) in .d <> d(out.d) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> b(out.b) out.c <> c(in .c) out.e <> e(in .e) } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLBuffer { def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default) def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde) def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace) def apply( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters): TLNode = { val buffer = LazyModule(new TLBuffer(a, b, c, d, e)) buffer.node } def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = { val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) } name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } } buffers.map(_.node) } def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = { chain(depth, name) .reduceLeftOption(_ :*=* _) .getOrElse(TLNameNode("no_buffer")) } }
module TLBuffer_20( // @[Buffer.scala:40:9] input clock, // @[Buffer.scala:40:9] input reset // @[Buffer.scala:40:9] ); endmodule
Generate the Verilog code corresponding to the following Chisel files. File tage.scala: package boom.v3.ifu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import boom.v3.common._ import boom.v3.util.{BoomCoreStringPrefix, MaskLower, WrapInc} import scala.math.min class TageResp extends Bundle { val ctr = UInt(3.W) val u = UInt(2.W) } class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int) (implicit p: Parameters) extends BoomModule()(p) with HasBoomFrontendParameters { require(histLength <= globalHistoryLength) val nWrBypassEntries = 2 val io = IO( new Bundle { val f1_req_valid = Input(Bool()) val f1_req_pc = Input(UInt(vaddrBitsExtended.W)) val f1_req_ghist = Input(UInt(globalHistoryLength.W)) val f3_resp = Output(Vec(bankWidth, Valid(new TageResp))) val update_mask = Input(Vec(bankWidth, Bool())) val update_taken = Input(Vec(bankWidth, Bool())) val update_alloc = Input(Vec(bankWidth, Bool())) val update_old_ctr = Input(Vec(bankWidth, UInt(3.W))) val update_pc = Input(UInt()) val update_hist = Input(UInt()) val update_u_mask = Input(Vec(bankWidth, Bool())) val update_u = Input(Vec(bankWidth, UInt(2.W))) }) def compute_folded_hist(hist: UInt, l: Int) = { val nChunks = (histLength + l - 1) / l val hist_chunks = (0 until nChunks) map {i => hist(min((i+1)*l, histLength)-1, i*l) } hist_chunks.reduce(_^_) } def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = { val idx_history = compute_folded_hist(hist, log2Ceil(nRows)) val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0) val tag_history = compute_folded_hist(hist, tagSz) val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0) (idx, tag) } def inc_ctr(ctr: UInt, taken: Bool): UInt = { Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U), Mux(ctr === 7.U, 7.U, ctr + 1.U)) } val doing_reset = RegInit(true.B) val reset_idx = RegInit(0.U(log2Ceil(nRows).W)) reset_idx := reset_idx + doing_reset when (reset_idx === (nRows-1).U) { doing_reset := false.B } class TageEntry extends Bundle { val valid = Bool() // TODO: Remove this valid bit val tag = UInt(tagSz.W) val ctr = UInt(3.W) } val tageEntrySz = 1 + tagSz + 3 val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist) val hi_us = SyncReadMem(nRows, Vec(bankWidth, Bool())) val lo_us = SyncReadMem(nRows, Vec(bankWidth, Bool())) val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W))) val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz)) val s2_tag = RegNext(s1_tag) val s2_req_rtage = VecInit(table.read(s1_hashed_idx, io.f1_req_valid).map(_.asTypeOf(new TageEntry))) val s2_req_rhius = hi_us.read(s1_hashed_idx, io.f1_req_valid) val s2_req_rlous = lo_us.read(s1_hashed_idx, io.f1_req_valid) val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset)) for (w <- 0 until bankWidth) { // This bit indicates the TAGE table matched here io.f3_resp(w).valid := RegNext(s2_req_rhits(w)) io.f3_resp(w).bits.u := RegNext(Cat(s2_req_rhius(w), s2_req_rlous(w))) io.f3_resp(w).bits.ctr := RegNext(s2_req_rtage(w).ctr) } val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W)) when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U } val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U val doing_clear_u_hi = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U val doing_clear_u_lo = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod) val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist) val update_wdata = Wire(Vec(bankWidth, new TageEntry)) table.write( Mux(doing_reset, reset_idx , update_idx), Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt))), Mux(doing_reset, ~(0.U(bankWidth.W)) , io.update_mask.asUInt).asBools ) val update_hi_wdata = Wire(Vec(bankWidth, Bool())) hi_us.write( Mux(doing_reset, reset_idx, Mux(doing_clear_u_hi, clear_u_idx, update_idx)), Mux(doing_reset || doing_clear_u_hi, VecInit((0.U(bankWidth.W)).asBools), update_hi_wdata), Mux(doing_reset || doing_clear_u_hi, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools ) val update_lo_wdata = Wire(Vec(bankWidth, Bool())) lo_us.write( Mux(doing_reset, reset_idx, Mux(doing_clear_u_lo, clear_u_idx, update_idx)), Mux(doing_reset || doing_clear_u_lo, VecInit((0.U(bankWidth.W)).asBools), update_lo_wdata), Mux(doing_reset || doing_clear_u_lo, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools ) val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W))) val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W))) val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W)))) val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W)) val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i => !doing_reset && wrbypass_tags(i) === update_tag && wrbypass_idxs(i) === update_idx }) val wrbypass_hit = wrbypass_hits.reduce(_||_) val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits) for (w <- 0 until bankWidth) { update_wdata(w).ctr := Mux(io.update_alloc(w), Mux(io.update_taken(w), 4.U, 3.U ), Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)), inc_ctr(io.update_old_ctr(w), io.update_taken(w)) ) ) update_wdata(w).valid := true.B update_wdata(w).tag := update_tag update_hi_wdata(w) := io.update_u(w)(1) update_lo_wdata(w) := io.update_u(w)(0) } when (io.update_mask.reduce(_||_)) { when (wrbypass_hits.reduce(_||_)) { wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr)) } .otherwise { wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr)) wrbypass_tags(wrbypass_enq_idx) := update_tag wrbypass_idxs(wrbypass_enq_idx) := update_idx wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries) } } } case class BoomTageParams( // nSets, histLen, tagSz tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7), ( 128, 4, 7), ( 256, 8, 8), ( 256, 16, 8), ( 128, 32, 9), ( 128, 64, 9)), uBitPeriod: Int = 2048 ) class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p) { val tageUBitPeriod = params.uBitPeriod val tageNTables = params.tableInfo.size class TageMeta extends Bundle { val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W))) val alt_differs = Vec(bankWidth, Output(Bool())) val provider_u = Vec(bankWidth, Output(UInt(2.W))) val provider_ctr = Vec(bankWidth, Output(UInt(3.W))) val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W))) } val f3_meta = Wire(new TageMeta) override val metaSz = f3_meta.asUInt.getWidth require(metaSz <= bpdMaxMetaLength) def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = { Mux(!alt_differs, u, Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U), Mux(u === 3.U, 3.U, u + 1.U))) } val tt = params.tableInfo map { case (n, l, s) => { val t = Module(new TageTable(n, s, l, params.uBitPeriod)) t.io.f1_req_valid := RegNext(io.f0_valid) t.io.f1_req_pc := RegNext(io.f0_pc) t.io.f1_req_ghist := io.f1_ghist (t, t.mems) } } val tables = tt.map(_._1) val mems = tt.map(_._2).flatten val f3_resps = VecInit(tables.map(_.io.f3_resp)) val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta) val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) & Fill(bankWidth, s1_update.bits.cfi_mispredicted) val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool())))) val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W))))) val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool()))) val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W)))) val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool()))) val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W)))) s1_update_taken := DontCare s1_update_old_ctr := DontCare s1_update_alloc := DontCare s1_update_u := DontCare for (w <- 0 until bankWidth) { var altpred = io.resp_in(0).f3(w).taken val final_altpred = WireInit(io.resp_in(0).f3(w).taken) var provided = false.B var provider = 0.U io.resp.f3(w).taken := io.resp_in(0).f3(w).taken for (i <- 0 until tageNTables) { val hit = f3_resps(i)(w).valid val ctr = f3_resps(i)(w).bits.ctr when (hit) { io.resp.f3(w).taken := Mux(ctr === 3.U || ctr === 4.U, altpred, ctr(2)) final_altpred := altpred } provided = provided || hit provider = Mux(hit, i.U, provider) altpred = Mux(hit, f3_resps(i)(w).bits.ctr(2), altpred) } f3_meta.provider(w).valid := provided f3_meta.provider(w).bits := provider f3_meta.alt_differs(w) := final_altpred =/= io.resp.f3(w).taken f3_meta.provider_u(w) := f3_resps(provider)(w).bits.u f3_meta.provider_ctr(w) := f3_resps(provider)(w).bits.ctr // Create a mask of tables which did not hit our query, and also contain useless entries // and also uses a longer history than the provider val allocatable_slots = ( VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt & ~(MaskLower(UIntToOH(provider)) & Fill(tageNTables, provided)) ) val alloc_lfsr = random.LFSR(tageNTables max 2) val first_entry = PriorityEncoder(allocatable_slots) val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr) val alloc_entry = Mux(allocatable_slots(masked_entry), masked_entry, first_entry) f3_meta.allocate(w).valid := allocatable_slots =/= 0.U f3_meta.allocate(w).bits := alloc_entry val update_was_taken = (s1_update.bits.cfi_idx.valid && (s1_update.bits.cfi_idx.bits === w.U) && s1_update.bits.cfi_taken) when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) { when (s1_update_meta.provider(w).valid) { val provider = s1_update_meta.provider(w).bits s1_update_mask(provider)(w) := true.B s1_update_u_mask(provider)(w) := true.B val new_u = inc_u(s1_update_meta.provider_u(w), s1_update_meta.alt_differs(w), s1_update_mispredict_mask(w)) s1_update_u (provider)(w) := new_u s1_update_taken (provider)(w) := update_was_taken s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w) s1_update_alloc (provider)(w) := false.B } } } when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) { val idx = s1_update.bits.cfi_idx.bits val allocate = s1_update_meta.allocate(idx) when (allocate.valid) { s1_update_mask (allocate.bits)(idx) := true.B s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken s1_update_alloc(allocate.bits)(idx) := true.B s1_update_u_mask(allocate.bits)(idx) := true.B s1_update_u (allocate.bits)(idx) := 0.U } .otherwise { val provider = s1_update_meta.provider(idx) val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U) for (i <- 0 until tageNTables) { when (decr_mask(i)) { s1_update_u_mask(i)(idx) := true.B s1_update_u (i)(idx) := 0.U } } } } for (i <- 0 until tageNTables) { for (w <- 0 until bankWidth) { tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w)) tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w)) tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w)) tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w)) tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w)) tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w)) } tables(i).io.update_pc := RegNext(s1_update.bits.pc) tables(i).io.update_hist := RegNext(s1_update.bits.ghist) } //io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0)) io.f3_meta := f3_meta.asUInt }
module hi_us_7( // @[tage.scala:89:27] input [7:0] R0_addr, input R0_en, input R0_clk, output [3:0] R0_data, input [7:0] W0_addr, input W0_clk, input [3:0] W0_data, input [3:0] W0_mask ); hi_us_0_ext hi_us_0_ext ( // @[tage.scala:89:27] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (1'h1), // @[tage.scala:89:27] .W0_clk (W0_clk), .W0_data (W0_data), .W0_mask (W0_mask) ); // @[tage.scala:89:27] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } } File Arbiter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ object TLArbiter { // (valids, select) => readys type Policy = (Integer, UInt, Bool) => UInt val lowestIndexFirst: Policy = (width, valids, select) => ~(leftOR(valids) << 1)(width-1, 0) val highestIndexFirst: Policy = (width, valids, select) => ~((rightOR(valids) >> 1).pad(width)) val roundRobin: Policy = (width, valids, select) => if (width == 1) 1.U(1.W) else { val valid = valids(width-1, 0) assert (valid === valids) val mask = RegInit(((BigInt(1) << width)-1).U(width-1,0)) val filter = Cat(valid & ~mask, valid) val unready = (rightOR(filter, width*2, width) >> 1) | (mask << width) val readys = ~((unready >> width) & unready(width-1, 0)) when (select && valid.orR) { mask := leftOR(readys & valid, width) } readys(width-1, 0) } def lowestFromSeq[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: Seq[DecoupledIO[T]]): Unit = { apply(lowestIndexFirst)(sink, sources.map(s => (edge.numBeats1(s.bits), s)):_*) } def lowest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(lowestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def highest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(highestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def robin[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(roundRobin)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def apply[T <: Data](policy: Policy)(sink: DecoupledIO[T], sources: (UInt, DecoupledIO[T])*): Unit = { if (sources.isEmpty) { sink.bits := DontCare } else if (sources.size == 1) { sink :<>= sources.head._2 } else { val pairs = sources.toList val beatsIn = pairs.map(_._1) val sourcesIn = pairs.map(_._2) // The number of beats which remain to be sent val beatsLeft = RegInit(0.U) val idle = beatsLeft === 0.U val latch = idle && sink.ready // winner (if any) claims sink // Who wants access to the sink? val valids = sourcesIn.map(_.valid) // Arbitrate amongst the requests val readys = VecInit(policy(valids.size, Cat(valids.reverse), latch).asBools) // Which request wins arbitration? val winner = VecInit((readys zip valids) map { case (r,v) => r&&v }) // Confirm the policy works properly require (readys.size == valids.size) // Never two winners val prefixOR = winner.scanLeft(false.B)(_||_).init assert((prefixOR zip winner) map { case (p,w) => !p || !w } reduce {_ && _}) // If there was any request, there is a winner assert (!valids.reduce(_||_) || winner.reduce(_||_)) // Track remaining beats val maskedBeats = (winner zip beatsIn) map { case (w,b) => Mux(w, b, 0.U) } val initBeats = maskedBeats.reduce(_ | _) // no winner => 0 beats beatsLeft := Mux(latch, initBeats, beatsLeft - sink.fire) // The one-hot source granted access in the previous cycle val state = RegInit(VecInit(Seq.fill(sources.size)(false.B))) val muxState = Mux(idle, winner, state) state := muxState val allowed = Mux(idle, readys, state) (sourcesIn zip allowed) foreach { case (s, r) => s.ready := sink.ready && r } sink.valid := Mux(idle, valids.reduce(_||_), Mux1H(state, valids)) sink.bits :<= Mux1H(muxState, sourcesIn.map(_.bits)) } } } // Synthesizable unit tests import freechips.rocketchip.unittest._ abstract class DecoupledArbiterTest( policy: TLArbiter.Policy, txns: Int, timeout: Int, val numSources: Int, beatsLeftFromIdx: Int => UInt) (implicit p: Parameters) extends UnitTest(timeout) { val sources = Wire(Vec(numSources, DecoupledIO(UInt(log2Ceil(numSources).W)))) dontTouch(sources.suggestName("sources")) val sink = Wire(DecoupledIO(UInt(log2Ceil(numSources).W))) dontTouch(sink.suggestName("sink")) val count = RegInit(0.U(log2Ceil(txns).W)) val lfsr = LFSR(16, true.B) sources.zipWithIndex.map { case (z, i) => z.bits := i.U } TLArbiter(policy)(sink, sources.zipWithIndex.map { case (z, i) => (beatsLeftFromIdx(i), z) }:_*) count := count + 1.U io.finished := count >= txns.U } /** This tests that when a specific pattern of source valids are driven, * a new index from amongst that pattern is always selected, * unless one of those sources takes multiple beats, * in which case the same index should be selected until the arbiter goes idle. */ class TLDecoupledArbiterRobinTest(txns: Int = 128, timeout: Int = 500000, print: Boolean = false) (implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.roundRobin, txns, timeout, 6, i => i.U) { val lastWinner = RegInit((numSources+1).U) val beatsLeft = RegInit(0.U(log2Ceil(numSources).W)) val first = lastWinner > numSources.U val valid = lfsr(0) val ready = lfsr(15) sink.ready := ready sources.zipWithIndex.map { // pattern: every even-indexed valid is driven the same random way case (s, i) => s.valid := (if (i % 2 == 1) false.B else valid) } when (sink.fire) { if (print) { printf("TestRobin: %d\n", sink.bits) } when (beatsLeft === 0.U) { assert(lastWinner =/= sink.bits, "Round robin did not pick a new idx despite one being valid.") lastWinner := sink.bits beatsLeft := sink.bits } .otherwise { assert(lastWinner === sink.bits, "Round robin did not pick the same index over multiple beats") beatsLeft := beatsLeft - 1.U } } if (print) { when (!sink.fire) { printf("TestRobin: idle (%d %d)\n", valid, ready) } } } /** This tests that the lowest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterLowestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.lowestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertLowest(id: Int): Unit = { when (sources(id).valid) { assert((numSources-1 until id by -1).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a higher valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertLowest(_)) } } /** This tests that the highest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterHighestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.highestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertHighest(id: Int): Unit = { when (sources(id).valid) { assert((0 until id).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a lower valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertHighest(_)) } }
module UnsafeAXI4ToTL( // @[UnsafeAXI4ToTL.scala:79:25] input clock, // @[UnsafeAXI4ToTL.scala:79:25] input reset, // @[UnsafeAXI4ToTL.scala:79:25] output auto_in_aw_ready, // @[LazyModuleImp.scala:107:25] input auto_in_aw_valid, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_aw_bits_id, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_aw_bits_addr, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_aw_bits_len, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_aw_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_in_aw_bits_burst, // @[LazyModuleImp.scala:107:25] input auto_in_aw_bits_lock, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_aw_bits_cache, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_aw_bits_prot, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_aw_bits_qos, // @[LazyModuleImp.scala:107:25] output auto_in_w_ready, // @[LazyModuleImp.scala:107:25] input auto_in_w_valid, // @[LazyModuleImp.scala:107:25] input [63:0] auto_in_w_bits_data, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_w_bits_strb, // @[LazyModuleImp.scala:107:25] input auto_in_w_bits_last, // @[LazyModuleImp.scala:107:25] input auto_in_b_ready, // @[LazyModuleImp.scala:107:25] output auto_in_b_valid, // @[LazyModuleImp.scala:107:25] output [3:0] auto_in_b_bits_id, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_b_bits_resp, // @[LazyModuleImp.scala:107:25] output auto_in_ar_ready, // @[LazyModuleImp.scala:107:25] input auto_in_ar_valid, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_ar_bits_id, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_ar_bits_addr, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_ar_bits_len, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_ar_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_in_ar_bits_burst, // @[LazyModuleImp.scala:107:25] input auto_in_ar_bits_lock, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_ar_bits_cache, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_ar_bits_prot, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_ar_bits_qos, // @[LazyModuleImp.scala:107:25] input auto_in_r_ready, // @[LazyModuleImp.scala:107:25] output auto_in_r_valid, // @[LazyModuleImp.scala:107:25] output [3:0] auto_in_r_bits_id, // @[LazyModuleImp.scala:107:25] output [63:0] auto_in_r_bits_data, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_r_bits_resp, // @[LazyModuleImp.scala:107:25] output auto_in_r_bits_last, // @[LazyModuleImp.scala:107:25] input auto_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [5:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [5:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25] ); wire _listBuffer_ioReserve_ready; // @[UnsafeAXI4ToTL.scala:108:15] wire [4:0] _listBuffer_ioReservedIndex; // @[UnsafeAXI4ToTL.scala:108:15] wire _listBuffer_ioResponse_ready; // @[UnsafeAXI4ToTL.scala:108:15] wire auto_in_aw_valid_0 = auto_in_aw_valid; // @[UnsafeAXI4ToTL.scala:79:25] wire [3:0] auto_in_aw_bits_id_0 = auto_in_aw_bits_id; // @[UnsafeAXI4ToTL.scala:79:25] wire [31:0] auto_in_aw_bits_addr_0 = auto_in_aw_bits_addr; // @[UnsafeAXI4ToTL.scala:79:25] wire [7:0] auto_in_aw_bits_len_0 = auto_in_aw_bits_len; // @[UnsafeAXI4ToTL.scala:79:25] wire [2:0] auto_in_aw_bits_size_0 = auto_in_aw_bits_size; // @[UnsafeAXI4ToTL.scala:79:25] wire [1:0] auto_in_aw_bits_burst_0 = auto_in_aw_bits_burst; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_in_aw_bits_lock_0 = auto_in_aw_bits_lock; // @[UnsafeAXI4ToTL.scala:79:25] wire [3:0] auto_in_aw_bits_cache_0 = auto_in_aw_bits_cache; // @[UnsafeAXI4ToTL.scala:79:25] wire [2:0] auto_in_aw_bits_prot_0 = auto_in_aw_bits_prot; // @[UnsafeAXI4ToTL.scala:79:25] wire [3:0] auto_in_aw_bits_qos_0 = auto_in_aw_bits_qos; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_in_w_valid_0 = auto_in_w_valid; // @[UnsafeAXI4ToTL.scala:79:25] wire [63:0] auto_in_w_bits_data_0 = auto_in_w_bits_data; // @[UnsafeAXI4ToTL.scala:79:25] wire [7:0] auto_in_w_bits_strb_0 = auto_in_w_bits_strb; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_in_w_bits_last_0 = auto_in_w_bits_last; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_in_b_ready_0 = auto_in_b_ready; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_in_ar_valid_0 = auto_in_ar_valid; // @[UnsafeAXI4ToTL.scala:79:25] wire [3:0] auto_in_ar_bits_id_0 = auto_in_ar_bits_id; // @[UnsafeAXI4ToTL.scala:79:25] wire [31:0] auto_in_ar_bits_addr_0 = auto_in_ar_bits_addr; // @[UnsafeAXI4ToTL.scala:79:25] wire [7:0] auto_in_ar_bits_len_0 = auto_in_ar_bits_len; // @[UnsafeAXI4ToTL.scala:79:25] wire [2:0] auto_in_ar_bits_size_0 = auto_in_ar_bits_size; // @[UnsafeAXI4ToTL.scala:79:25] wire [1:0] auto_in_ar_bits_burst_0 = auto_in_ar_bits_burst; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_in_ar_bits_lock_0 = auto_in_ar_bits_lock; // @[UnsafeAXI4ToTL.scala:79:25] wire [3:0] auto_in_ar_bits_cache_0 = auto_in_ar_bits_cache; // @[UnsafeAXI4ToTL.scala:79:25] wire [2:0] auto_in_ar_bits_prot_0 = auto_in_ar_bits_prot; // @[UnsafeAXI4ToTL.scala:79:25] wire [3:0] auto_in_ar_bits_qos_0 = auto_in_ar_bits_qos; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_in_r_ready_0 = auto_in_r_ready; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_out_a_ready_0 = auto_out_a_ready; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_out_d_valid_0 = auto_out_d_valid; // @[UnsafeAXI4ToTL.scala:79:25] wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[UnsafeAXI4ToTL.scala:79:25] wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[UnsafeAXI4ToTL.scala:79:25] wire [3:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[UnsafeAXI4ToTL.scala:79:25] wire [5:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[UnsafeAXI4ToTL.scala:79:25] wire [2:0] auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[UnsafeAXI4ToTL.scala:79:25] wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[UnsafeAXI4ToTL.scala:79:25] wire _readys_T_2 = reset; // @[Arbiter.scala:22:12] wire _rOk_T = 1'h1; // @[Parameters.scala:92:28] wire _rOk_T_10 = 1'h1; // @[Parameters.scala:92:28] wire _rOut_bits_legal_T = 1'h1; // @[Parameters.scala:92:28] wire _rOut_bits_legal_T_10 = 1'h1; // @[Parameters.scala:92:28] wire _wOk_T = 1'h1; // @[Parameters.scala:92:28] wire _wOk_T_10 = 1'h1; // @[Parameters.scala:92:28] wire _wOut_bits_legal_T = 1'h1; // @[Parameters.scala:92:28] wire _wOut_bits_legal_T_10 = 1'h1; // @[Parameters.scala:92:28] wire beats1_opdata = 1'h1; // @[Edges.scala:92:28] wire [2:0] auto_out_a_bits_param = 3'h0; // @[UnsafeAXI4ToTL.scala:79:25] wire [2:0] nodeOut_a_bits_param = 3'h0; // @[MixedNode.scala:542:17] wire [2:0] rOut_bits_param = 3'h0; // @[UnsafeAXI4ToTL.scala:120:25] wire [2:0] rOut_bits_a_param = 3'h0; // @[Edges.scala:460:17] wire [2:0] wOut_bits_param = 3'h0; // @[UnsafeAXI4ToTL.scala:182:25] wire [2:0] wOut_bits_a_param = 3'h0; // @[Edges.scala:500:17] wire [2:0] _nodeOut_a_bits_WIRE_param = 3'h0; // @[Mux.scala:30:73] wire [2:0] _nodeOut_a_bits_T_18 = 3'h0; // @[Mux.scala:30:73] wire [2:0] _nodeOut_a_bits_T_19 = 3'h0; // @[Mux.scala:30:73] wire [2:0] _nodeOut_a_bits_T_20 = 3'h0; // @[Mux.scala:30:73] wire [2:0] _nodeOut_a_bits_WIRE_9 = 3'h0; // @[Mux.scala:30:73] wire auto_out_a_bits_corrupt = 1'h0; // @[UnsafeAXI4ToTL.scala:79:25] wire nodeOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire rOut_bits_corrupt = 1'h0; // @[UnsafeAXI4ToTL.scala:120:25] wire rOut_bits_a_corrupt = 1'h0; // @[Edges.scala:460:17] wire wOut_bits_corrupt = 1'h0; // @[UnsafeAXI4ToTL.scala:182:25] wire _wOk_T_62 = 1'h0; // @[Parameters.scala:684:29] wire _wOk_T_68 = 1'h0; // @[Parameters.scala:684:54] wire _wOut_bits_legal_T_62 = 1'h0; // @[Parameters.scala:684:29] wire _wOut_bits_legal_T_68 = 1'h0; // @[Parameters.scala:684:54] wire wOut_bits_a_corrupt = 1'h0; // @[Edges.scala:500:17] wire maskedBeats_0 = 1'h0; // @[Arbiter.scala:82:69] wire _state_WIRE_0 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_1 = 1'h0; // @[Arbiter.scala:88:34] wire _nodeOut_a_bits_WIRE_corrupt = 1'h0; // @[Mux.scala:30:73] wire _nodeOut_a_bits_T = 1'h0; // @[Mux.scala:30:73] wire _nodeOut_a_bits_T_1 = 1'h0; // @[Mux.scala:30:73] wire _nodeOut_a_bits_T_2 = 1'h0; // @[Mux.scala:30:73] wire _nodeOut_a_bits_WIRE_1 = 1'h0; // @[Mux.scala:30:73] wire _beats1_opdata_T = 1'h0; // @[Edges.scala:92:37] wire [63:0] rOut_bits_data = 64'h0; // @[UnsafeAXI4ToTL.scala:120:25] wire [63:0] rOut_bits_a_data = 64'h0; // @[Edges.scala:460:17] wire [63:0] _nodeOut_a_bits_T_3 = 64'h0; // @[Mux.scala:30:73] wire [2:0] wOut_bits_opcode = 3'h1; // @[UnsafeAXI4ToTL.scala:182:25] wire [2:0] wOut_bits_a_opcode = 3'h1; // @[Edges.scala:500:17] wire [2:0] rOut_bits_opcode = 3'h4; // @[UnsafeAXI4ToTL.scala:120:25] wire [2:0] rOut_bits_a_opcode = 3'h4; // @[Edges.scala:460:17] wire nodeIn_aw_ready; // @[MixedNode.scala:551:17] wire nodeIn_aw_valid = auto_in_aw_valid_0; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_aw_bits_id = auto_in_aw_bits_id_0; // @[MixedNode.scala:551:17] wire [31:0] nodeIn_aw_bits_addr = auto_in_aw_bits_addr_0; // @[MixedNode.scala:551:17] wire [7:0] nodeIn_aw_bits_len = auto_in_aw_bits_len_0; // @[MixedNode.scala:551:17] wire [2:0] nodeIn_aw_bits_size = auto_in_aw_bits_size_0; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_aw_bits_burst = auto_in_aw_bits_burst_0; // @[MixedNode.scala:551:17] wire nodeIn_aw_bits_lock = auto_in_aw_bits_lock_0; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_aw_bits_cache = auto_in_aw_bits_cache_0; // @[MixedNode.scala:551:17] wire [2:0] nodeIn_aw_bits_prot = auto_in_aw_bits_prot_0; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_aw_bits_qos = auto_in_aw_bits_qos_0; // @[MixedNode.scala:551:17] wire nodeIn_w_ready; // @[MixedNode.scala:551:17] wire nodeIn_w_valid = auto_in_w_valid_0; // @[MixedNode.scala:551:17] wire [63:0] nodeIn_w_bits_data = auto_in_w_bits_data_0; // @[MixedNode.scala:551:17] wire [7:0] nodeIn_w_bits_strb = auto_in_w_bits_strb_0; // @[MixedNode.scala:551:17] wire nodeIn_w_bits_last = auto_in_w_bits_last_0; // @[MixedNode.scala:551:17] wire nodeIn_b_ready = auto_in_b_ready_0; // @[MixedNode.scala:551:17] wire nodeIn_b_valid; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_b_bits_id; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_b_bits_resp; // @[MixedNode.scala:551:17] wire nodeIn_ar_ready; // @[MixedNode.scala:551:17] wire nodeIn_ar_valid = auto_in_ar_valid_0; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_ar_bits_id = auto_in_ar_bits_id_0; // @[MixedNode.scala:551:17] wire [31:0] nodeIn_ar_bits_addr = auto_in_ar_bits_addr_0; // @[MixedNode.scala:551:17] wire [7:0] nodeIn_ar_bits_len = auto_in_ar_bits_len_0; // @[MixedNode.scala:551:17] wire [2:0] nodeIn_ar_bits_size = auto_in_ar_bits_size_0; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_ar_bits_burst = auto_in_ar_bits_burst_0; // @[MixedNode.scala:551:17] wire nodeIn_ar_bits_lock = auto_in_ar_bits_lock_0; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_ar_bits_cache = auto_in_ar_bits_cache_0; // @[MixedNode.scala:551:17] wire [2:0] nodeIn_ar_bits_prot = auto_in_ar_bits_prot_0; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_ar_bits_qos = auto_in_ar_bits_qos_0; // @[MixedNode.scala:551:17] wire nodeIn_r_ready = auto_in_r_ready_0; // @[MixedNode.scala:551:17] wire nodeIn_r_valid; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_r_bits_id; // @[MixedNode.scala:551:17] wire [63:0] nodeIn_r_bits_data; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_r_bits_resp; // @[MixedNode.scala:551:17] wire nodeIn_r_bits_last; // @[MixedNode.scala:551:17] wire nodeOut_a_ready = auto_out_a_ready_0; // @[MixedNode.scala:542:17] wire nodeOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17] wire [5:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17] wire nodeOut_d_ready; // @[MixedNode.scala:542:17] wire nodeOut_d_valid = auto_out_d_valid_0; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[MixedNode.scala:542:17] wire [5:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[MixedNode.scala:542:17] wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[MixedNode.scala:542:17] wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[MixedNode.scala:542:17] wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[MixedNode.scala:542:17] wire auto_in_aw_ready_0; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_in_w_ready_0; // @[UnsafeAXI4ToTL.scala:79:25] wire [3:0] auto_in_b_bits_id_0; // @[UnsafeAXI4ToTL.scala:79:25] wire [1:0] auto_in_b_bits_resp_0; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_in_b_valid_0; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_in_ar_ready_0; // @[UnsafeAXI4ToTL.scala:79:25] wire [3:0] auto_in_r_bits_id_0; // @[UnsafeAXI4ToTL.scala:79:25] wire [63:0] auto_in_r_bits_data_0; // @[UnsafeAXI4ToTL.scala:79:25] wire [1:0] auto_in_r_bits_resp_0; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_in_r_bits_last_0; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_in_r_valid_0; // @[UnsafeAXI4ToTL.scala:79:25] wire [2:0] auto_out_a_bits_opcode_0; // @[UnsafeAXI4ToTL.scala:79:25] wire [3:0] auto_out_a_bits_size_0; // @[UnsafeAXI4ToTL.scala:79:25] wire [5:0] auto_out_a_bits_source_0; // @[UnsafeAXI4ToTL.scala:79:25] wire [31:0] auto_out_a_bits_address_0; // @[UnsafeAXI4ToTL.scala:79:25] wire [7:0] auto_out_a_bits_mask_0; // @[UnsafeAXI4ToTL.scala:79:25] wire [63:0] auto_out_a_bits_data_0; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_out_a_valid_0; // @[UnsafeAXI4ToTL.scala:79:25] wire auto_out_d_ready_0; // @[UnsafeAXI4ToTL.scala:79:25] wire _nodeIn_aw_ready_T_2; // @[UnsafeAXI4ToTL.scala:197:65] assign auto_in_aw_ready_0 = nodeIn_aw_ready; // @[MixedNode.scala:551:17] wire [31:0] _wOk_T_14 = nodeIn_aw_bits_addr; // @[Parameters.scala:137:31] wire _nodeIn_w_ready_T_1; // @[UnsafeAXI4ToTL.scala:198:48] assign auto_in_w_ready_0 = nodeIn_w_ready; // @[MixedNode.scala:551:17] wire [63:0] wOut_bits_a_data = nodeIn_w_bits_data; // @[Edges.scala:500:17] wire [7:0] wOut_bits_a_mask = nodeIn_w_bits_strb; // @[Edges.scala:500:17] wire nodeIn_b_irr_ready = nodeIn_b_ready; // @[Decoupled.scala:401:19] wire nodeIn_b_irr_valid; // @[Decoupled.scala:401:19] assign auto_in_b_valid_0 = nodeIn_b_valid; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_b_irr_bits_id; // @[Decoupled.scala:401:19] assign auto_in_b_bits_id_0 = nodeIn_b_bits_id; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_b_irr_bits_resp; // @[Decoupled.scala:401:19] assign auto_in_b_bits_resp_0 = nodeIn_b_bits_resp; // @[MixedNode.scala:551:17] wire _nodeIn_ar_ready_T; // @[UnsafeAXI4ToTL.scala:136:48] assign auto_in_ar_ready_0 = nodeIn_ar_ready; // @[MixedNode.scala:551:17] wire [31:0] _rOk_T_14 = nodeIn_ar_bits_addr; // @[Parameters.scala:137:31] wire nodeIn_r_irr_ready = nodeIn_r_ready; // @[Decoupled.scala:401:19] wire nodeIn_r_irr_valid; // @[Decoupled.scala:401:19] assign auto_in_r_valid_0 = nodeIn_r_valid; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_r_irr_bits_id; // @[Decoupled.scala:401:19] assign auto_in_r_bits_id_0 = nodeIn_r_bits_id; // @[MixedNode.scala:551:17] wire [63:0] nodeIn_r_irr_bits_data; // @[Decoupled.scala:401:19] assign auto_in_r_bits_data_0 = nodeIn_r_bits_data; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_r_irr_bits_resp; // @[Decoupled.scala:401:19] assign auto_in_r_bits_resp_0 = nodeIn_r_bits_resp; // @[MixedNode.scala:551:17] wire nodeIn_r_irr_bits_last; // @[Decoupled.scala:401:19] assign auto_in_r_bits_last_0 = nodeIn_r_bits_last; // @[MixedNode.scala:551:17] wire _nodeOut_a_valid_T_4; // @[Arbiter.scala:96:24] assign auto_out_a_valid_0 = nodeOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] _nodeOut_a_bits_WIRE_opcode; // @[Mux.scala:30:73] assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [3:0] _nodeOut_a_bits_WIRE_size; // @[Mux.scala:30:73] assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[MixedNode.scala:542:17] wire [5:0] _nodeOut_a_bits_WIRE_source; // @[Mux.scala:30:73] assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] _nodeOut_a_bits_WIRE_address; // @[Mux.scala:30:73] assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] _nodeOut_a_bits_WIRE_mask; // @[Mux.scala:30:73] assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] _nodeOut_a_bits_WIRE_data; // @[Mux.scala:30:73] assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[MixedNode.scala:542:17] wire _nodeOut_d_ready_T_2; // @[UnsafeAXI4ToTL.scala:231:50] assign auto_out_d_ready_0 = nodeOut_d_ready; // @[MixedNode.scala:542:17] wire _rOut_ready_T; // @[Arbiter.scala:94:31] wire _rOut_valid_T; // @[UnsafeAXI4ToTL.scala:137:49] wire [3:0] rOut_bits_a_size; // @[Edges.scala:460:17] wire [5:0] rOut_bits_a_source; // @[Edges.scala:460:17] wire [31:0] rOut_bits_a_address; // @[Edges.scala:460:17] wire [7:0] rOut_bits_a_mask; // @[Edges.scala:460:17] wire [3:0] rOut_bits_size; // @[UnsafeAXI4ToTL.scala:120:25] wire [5:0] rOut_bits_source; // @[UnsafeAXI4ToTL.scala:120:25] wire [31:0] rOut_bits_address; // @[UnsafeAXI4ToTL.scala:120:25] wire [7:0] rOut_bits_mask; // @[UnsafeAXI4ToTL.scala:120:25] wire rOut_ready; // @[UnsafeAXI4ToTL.scala:120:25] wire rOut_valid; // @[UnsafeAXI4ToTL.scala:120:25] wire [15:0] _rBytes1_T = {nodeIn_ar_bits_len, 8'hFF}; // @[Bundles.scala:33:9] wire [22:0] _rBytes1_T_1 = {7'h0, _rBytes1_T} << nodeIn_ar_bits_size; // @[Bundles.scala:33:{9,21}] wire [14:0] rBytes1 = _rBytes1_T_1[22:8]; // @[Bundles.scala:33:{21,30}] wire [15:0] _rSize_T = {rBytes1, 1'h0}; // @[package.scala:241:35] wire [15:0] _rSize_T_1 = {_rSize_T[15:1], 1'h1}; // @[package.scala:241:{35,40}] wire [15:0] _rSize_T_2 = {1'h0, rBytes1}; // @[package.scala:241:53] wire [15:0] _rSize_T_3 = ~_rSize_T_2; // @[package.scala:241:{49,53}] wire [15:0] _rSize_T_4 = _rSize_T_1 & _rSize_T_3; // @[package.scala:241:{40,47,49}] wire [7:0] rSize_hi = _rSize_T_4[15:8]; // @[OneHot.scala:30:18] wire [7:0] rSize_lo = _rSize_T_4[7:0]; // @[OneHot.scala:31:18] wire _rSize_T_5 = |rSize_hi; // @[OneHot.scala:30:18, :32:14] wire [7:0] _rSize_T_6 = rSize_hi | rSize_lo; // @[OneHot.scala:30:18, :31:18, :32:28] wire [3:0] rSize_hi_1 = _rSize_T_6[7:4]; // @[OneHot.scala:30:18, :32:28] wire [3:0] rSize_lo_1 = _rSize_T_6[3:0]; // @[OneHot.scala:31:18, :32:28] wire _rSize_T_7 = |rSize_hi_1; // @[OneHot.scala:30:18, :32:14] wire [3:0] _rSize_T_8 = rSize_hi_1 | rSize_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28] wire [1:0] rSize_hi_2 = _rSize_T_8[3:2]; // @[OneHot.scala:30:18, :32:28] wire [1:0] rSize_lo_2 = _rSize_T_8[1:0]; // @[OneHot.scala:31:18, :32:28] wire _rSize_T_9 = |rSize_hi_2; // @[OneHot.scala:30:18, :32:14] wire [1:0] _rSize_T_10 = rSize_hi_2 | rSize_lo_2; // @[OneHot.scala:30:18, :31:18, :32:28] wire _rSize_T_11 = _rSize_T_10[1]; // @[OneHot.scala:32:28] wire [1:0] _rSize_T_12 = {_rSize_T_9, _rSize_T_11}; // @[OneHot.scala:32:{10,14}] wire [2:0] _rSize_T_13 = {_rSize_T_7, _rSize_T_12}; // @[OneHot.scala:32:{10,14}] wire [3:0] rSize = {_rSize_T_5, _rSize_T_13}; // @[OneHot.scala:32:{10,14}] assign rOut_bits_a_size = rSize; // @[OneHot.scala:32:10] wire [3:0] _rOut_bits_a_mask_sizeOH_T = rSize; // @[OneHot.scala:32:10] wire _GEN = rSize < 4'hD; // @[OneHot.scala:32:10] wire _rOk_T_1; // @[Parameters.scala:92:38] assign _rOk_T_1 = _GEN; // @[Parameters.scala:92:38] wire _rOut_bits_legal_T_1; // @[Parameters.scala:92:38] assign _rOut_bits_legal_T_1 = _GEN; // @[Parameters.scala:92:38] wire _rOk_T_2 = _rOk_T_1; // @[Parameters.scala:92:{33,38}] wire _rOk_T_3 = _rOk_T_2; // @[Parameters.scala:684:29] wire [31:0] _rOk_T_4 = {nodeIn_ar_bits_addr[31:14], nodeIn_ar_bits_addr[13:0] ^ 14'h3000}; // @[Parameters.scala:137:31] wire [32:0] _rOk_T_5 = {1'h0, _rOk_T_4}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOk_T_6 = _rOk_T_5 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOk_T_7 = _rOk_T_6; // @[Parameters.scala:137:46] wire _rOk_T_8 = _rOk_T_7 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _rOk_T_9 = _rOk_T_3 & _rOk_T_8; // @[Parameters.scala:684:{29,54}] wire _rOk_T_68 = _rOk_T_9; // @[Parameters.scala:684:54, :686:26] wire _GEN_0 = rSize < 4'h7; // @[OneHot.scala:32:10] wire _rOk_T_11; // @[Parameters.scala:92:38] assign _rOk_T_11 = _GEN_0; // @[Parameters.scala:92:38] wire _rOut_bits_legal_T_11; // @[Parameters.scala:92:38] assign _rOut_bits_legal_T_11 = _GEN_0; // @[Parameters.scala:92:38] wire _rOk_T_12 = _rOk_T_11; // @[Parameters.scala:92:{33,38}] wire _rOk_T_13 = _rOk_T_12; // @[Parameters.scala:684:29] wire [32:0] _rOk_T_15 = {1'h0, _rOk_T_14}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOk_T_16 = _rOk_T_15 & 33'h1FFFFE000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOk_T_17 = _rOk_T_16; // @[Parameters.scala:137:46] wire _rOk_T_18 = _rOk_T_17 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _rOk_T_19 = {nodeIn_ar_bits_addr[31:17], nodeIn_ar_bits_addr[16:0] ^ 17'h10000}; // @[Parameters.scala:137:31] wire [32:0] _rOk_T_20 = {1'h0, _rOk_T_19}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOk_T_21 = _rOk_T_20 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOk_T_22 = _rOk_T_21; // @[Parameters.scala:137:46] wire _rOk_T_23 = _rOk_T_22 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _rOk_T_24 = {nodeIn_ar_bits_addr[31:21], nodeIn_ar_bits_addr[20:0] ^ 21'h100000}; // @[Parameters.scala:137:31] wire [32:0] _rOk_T_25 = {1'h0, _rOk_T_24}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOk_T_26 = _rOk_T_25 & 33'h1FFFEF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOk_T_27 = _rOk_T_26; // @[Parameters.scala:137:46] wire _rOk_T_28 = _rOk_T_27 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _rOk_T_29 = {nodeIn_ar_bits_addr[31:26], nodeIn_ar_bits_addr[25:0] ^ 26'h2000000}; // @[Parameters.scala:137:31] wire [32:0] _rOk_T_30 = {1'h0, _rOk_T_29}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOk_T_31 = _rOk_T_30 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOk_T_32 = _rOk_T_31; // @[Parameters.scala:137:46] wire _rOk_T_33 = _rOk_T_32 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _rOk_T_34 = {nodeIn_ar_bits_addr[31:26], nodeIn_ar_bits_addr[25:0] ^ 26'h2010000}; // @[Parameters.scala:137:31] wire [32:0] _rOk_T_35 = {1'h0, _rOk_T_34}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOk_T_36 = _rOk_T_35 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOk_T_37 = _rOk_T_36; // @[Parameters.scala:137:46] wire _rOk_T_38 = _rOk_T_37 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _rOk_T_39 = {nodeIn_ar_bits_addr[31:28], nodeIn_ar_bits_addr[27:0] ^ 28'h8000000}; // @[Parameters.scala:137:31] wire [32:0] _rOk_T_40 = {1'h0, _rOk_T_39}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOk_T_41 = _rOk_T_40 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOk_T_42 = _rOk_T_41; // @[Parameters.scala:137:46] wire _rOk_T_43 = _rOk_T_42 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _rOk_T_44 = {nodeIn_ar_bits_addr[31:28], nodeIn_ar_bits_addr[27:0] ^ 28'hC000000}; // @[Parameters.scala:137:31] wire [32:0] _rOk_T_45 = {1'h0, _rOk_T_44}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOk_T_46 = _rOk_T_45 & 33'h1FC000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOk_T_47 = _rOk_T_46; // @[Parameters.scala:137:46] wire _rOk_T_48 = _rOk_T_47 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _rOk_T_49 = {nodeIn_ar_bits_addr[31:29], nodeIn_ar_bits_addr[28:0] ^ 29'h10020000}; // @[Parameters.scala:137:31] wire [32:0] _rOk_T_50 = {1'h0, _rOk_T_49}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOk_T_51 = _rOk_T_50 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOk_T_52 = _rOk_T_51; // @[Parameters.scala:137:46] wire _rOk_T_53 = _rOk_T_52 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _rOk_T_54 = nodeIn_ar_bits_addr ^ 32'h80000000; // @[Parameters.scala:137:31] wire [32:0] _rOk_T_55 = {1'h0, _rOk_T_54}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOk_T_56 = _rOk_T_55 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOk_T_57 = _rOk_T_56; // @[Parameters.scala:137:46] wire _rOk_T_58 = _rOk_T_57 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _rOk_T_59 = _rOk_T_18 | _rOk_T_23; // @[Parameters.scala:685:42] wire _rOk_T_60 = _rOk_T_59 | _rOk_T_28; // @[Parameters.scala:685:42] wire _rOk_T_61 = _rOk_T_60 | _rOk_T_33; // @[Parameters.scala:685:42] wire _rOk_T_62 = _rOk_T_61 | _rOk_T_38; // @[Parameters.scala:685:42] wire _rOk_T_63 = _rOk_T_62 | _rOk_T_43; // @[Parameters.scala:685:42] wire _rOk_T_64 = _rOk_T_63 | _rOk_T_48; // @[Parameters.scala:685:42] wire _rOk_T_65 = _rOk_T_64 | _rOk_T_53; // @[Parameters.scala:685:42] wire _rOk_T_66 = _rOk_T_65 | _rOk_T_58; // @[Parameters.scala:685:42] wire _rOk_T_67 = _rOk_T_13 & _rOk_T_66; // @[Parameters.scala:684:{29,54}, :685:42] wire rOk = _rOk_T_68 | _rOk_T_67; // @[Parameters.scala:684:54, :686:26] wire [5:0] rId = {1'h0, _listBuffer_ioReservedIndex}; // @[UnsafeAXI4ToTL.scala:108:15, :125:12] assign rOut_bits_a_source = rId; // @[Edges.scala:460:17] wire [2:0] _rAddr_T = nodeIn_ar_bits_addr[2:0]; // @[MixedNode.scala:551:17] wire [13:0] _rAddr_T_1 = {11'h600, _rAddr_T}; // @[UnsafeAXI4ToTL.scala:129:{60,77}] wire [31:0] rAddr = rOk ? nodeIn_ar_bits_addr : {18'h0, _rAddr_T_1}; // @[Parameters.scala:686:26] wire [31:0] _rOut_bits_legal_T_14 = rAddr; // @[Parameters.scala:137:31] assign rOut_bits_a_address = rAddr; // @[Edges.scala:460:17] wire _listBuffer_ioReserve_valid_T = nodeIn_ar_valid & rOut_ready; // @[MixedNode.scala:551:17] assign _nodeIn_ar_ready_T = rOut_ready & _listBuffer_ioReserve_ready; // @[UnsafeAXI4ToTL.scala:108:15, :120:25, :136:48] assign nodeIn_ar_ready = _nodeIn_ar_ready_T; // @[MixedNode.scala:551:17] assign _rOut_valid_T = nodeIn_ar_valid & _listBuffer_ioReserve_ready; // @[MixedNode.scala:551:17] assign rOut_valid = _rOut_valid_T; // @[UnsafeAXI4ToTL.scala:120:25, :137:49] wire _rOut_bits_legal_T_2 = _rOut_bits_legal_T_1; // @[Parameters.scala:92:{33,38}] wire _rOut_bits_legal_T_3 = _rOut_bits_legal_T_2; // @[Parameters.scala:684:29] wire [31:0] _rOut_bits_legal_T_4 = {rAddr[31:14], rAddr[13:0] ^ 14'h3000}; // @[Parameters.scala:137:31] wire [32:0] _rOut_bits_legal_T_5 = {1'h0, _rOut_bits_legal_T_4}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOut_bits_legal_T_6 = _rOut_bits_legal_T_5 & 33'h9A013000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOut_bits_legal_T_7 = _rOut_bits_legal_T_6; // @[Parameters.scala:137:46] wire _rOut_bits_legal_T_8 = _rOut_bits_legal_T_7 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _rOut_bits_legal_T_9 = _rOut_bits_legal_T_3 & _rOut_bits_legal_T_8; // @[Parameters.scala:684:{29,54}] wire _rOut_bits_legal_T_62 = _rOut_bits_legal_T_9; // @[Parameters.scala:684:54, :686:26] wire _rOut_bits_legal_T_12 = _rOut_bits_legal_T_11; // @[Parameters.scala:92:{33,38}] wire _rOut_bits_legal_T_13 = _rOut_bits_legal_T_12; // @[Parameters.scala:684:29] wire [32:0] _rOut_bits_legal_T_15 = {1'h0, _rOut_bits_legal_T_14}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOut_bits_legal_T_16 = _rOut_bits_legal_T_15 & 33'h9A012000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOut_bits_legal_T_17 = _rOut_bits_legal_T_16; // @[Parameters.scala:137:46] wire _rOut_bits_legal_T_18 = _rOut_bits_legal_T_17 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _GEN_1 = {rAddr[31:17], rAddr[16:0] ^ 17'h10000}; // @[Parameters.scala:137:31] wire [31:0] _rOut_bits_legal_T_19; // @[Parameters.scala:137:31] assign _rOut_bits_legal_T_19 = _GEN_1; // @[Parameters.scala:137:31] wire [31:0] _rOut_bits_legal_T_24; // @[Parameters.scala:137:31] assign _rOut_bits_legal_T_24 = _GEN_1; // @[Parameters.scala:137:31] wire [32:0] _rOut_bits_legal_T_20 = {1'h0, _rOut_bits_legal_T_19}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOut_bits_legal_T_21 = _rOut_bits_legal_T_20 & 33'h98013000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOut_bits_legal_T_22 = _rOut_bits_legal_T_21; // @[Parameters.scala:137:46] wire _rOut_bits_legal_T_23 = _rOut_bits_legal_T_22 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [32:0] _rOut_bits_legal_T_25 = {1'h0, _rOut_bits_legal_T_24}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOut_bits_legal_T_26 = _rOut_bits_legal_T_25 & 33'h9A010000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOut_bits_legal_T_27 = _rOut_bits_legal_T_26; // @[Parameters.scala:137:46] wire _rOut_bits_legal_T_28 = _rOut_bits_legal_T_27 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _rOut_bits_legal_T_29 = {rAddr[31:26], rAddr[25:0] ^ 26'h2000000}; // @[Parameters.scala:137:31] wire [32:0] _rOut_bits_legal_T_30 = {1'h0, _rOut_bits_legal_T_29}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOut_bits_legal_T_31 = _rOut_bits_legal_T_30 & 33'h9A010000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOut_bits_legal_T_32 = _rOut_bits_legal_T_31; // @[Parameters.scala:137:46] wire _rOut_bits_legal_T_33 = _rOut_bits_legal_T_32 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _GEN_2 = {rAddr[31:28], rAddr[27:0] ^ 28'h8000000}; // @[Parameters.scala:137:31] wire [31:0] _rOut_bits_legal_T_34; // @[Parameters.scala:137:31] assign _rOut_bits_legal_T_34 = _GEN_2; // @[Parameters.scala:137:31] wire [31:0] _rOut_bits_legal_T_39; // @[Parameters.scala:137:31] assign _rOut_bits_legal_T_39 = _GEN_2; // @[Parameters.scala:137:31] wire [32:0] _rOut_bits_legal_T_35 = {1'h0, _rOut_bits_legal_T_34}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOut_bits_legal_T_36 = _rOut_bits_legal_T_35 & 33'h98000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOut_bits_legal_T_37 = _rOut_bits_legal_T_36; // @[Parameters.scala:137:46] wire _rOut_bits_legal_T_38 = _rOut_bits_legal_T_37 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [32:0] _rOut_bits_legal_T_40 = {1'h0, _rOut_bits_legal_T_39}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOut_bits_legal_T_41 = _rOut_bits_legal_T_40 & 33'h9A010000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOut_bits_legal_T_42 = _rOut_bits_legal_T_41; // @[Parameters.scala:137:46] wire _rOut_bits_legal_T_43 = _rOut_bits_legal_T_42 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _rOut_bits_legal_T_44 = {rAddr[31:29], rAddr[28:0] ^ 29'h10000000}; // @[Parameters.scala:137:31] wire [32:0] _rOut_bits_legal_T_45 = {1'h0, _rOut_bits_legal_T_44}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOut_bits_legal_T_46 = _rOut_bits_legal_T_45 & 33'h9A013000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOut_bits_legal_T_47 = _rOut_bits_legal_T_46; // @[Parameters.scala:137:46] wire _rOut_bits_legal_T_48 = _rOut_bits_legal_T_47 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _rOut_bits_legal_T_49 = rAddr ^ 32'h80000000; // @[Parameters.scala:137:31] wire [32:0] _rOut_bits_legal_T_50 = {1'h0, _rOut_bits_legal_T_49}; // @[Parameters.scala:137:{31,41}] wire [32:0] _rOut_bits_legal_T_51 = _rOut_bits_legal_T_50 & 33'h90000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _rOut_bits_legal_T_52 = _rOut_bits_legal_T_51; // @[Parameters.scala:137:46] wire _rOut_bits_legal_T_53 = _rOut_bits_legal_T_52 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _rOut_bits_legal_T_54 = _rOut_bits_legal_T_18 | _rOut_bits_legal_T_23; // @[Parameters.scala:685:42] wire _rOut_bits_legal_T_55 = _rOut_bits_legal_T_54 | _rOut_bits_legal_T_28; // @[Parameters.scala:685:42] wire _rOut_bits_legal_T_56 = _rOut_bits_legal_T_55 | _rOut_bits_legal_T_33; // @[Parameters.scala:685:42] wire _rOut_bits_legal_T_57 = _rOut_bits_legal_T_56 | _rOut_bits_legal_T_38; // @[Parameters.scala:685:42] wire _rOut_bits_legal_T_58 = _rOut_bits_legal_T_57 | _rOut_bits_legal_T_43; // @[Parameters.scala:685:42] wire _rOut_bits_legal_T_59 = _rOut_bits_legal_T_58 | _rOut_bits_legal_T_48; // @[Parameters.scala:685:42] wire _rOut_bits_legal_T_60 = _rOut_bits_legal_T_59 | _rOut_bits_legal_T_53; // @[Parameters.scala:685:42] wire _rOut_bits_legal_T_61 = _rOut_bits_legal_T_13 & _rOut_bits_legal_T_60; // @[Parameters.scala:684:{29,54}, :685:42] wire rOut_bits_legal = _rOut_bits_legal_T_62 | _rOut_bits_legal_T_61; // @[Parameters.scala:684:54, :686:26] assign rOut_bits_size = rOut_bits_a_size; // @[Edges.scala:460:17] assign rOut_bits_source = rOut_bits_a_source; // @[Edges.scala:460:17] assign rOut_bits_address = rOut_bits_a_address; // @[Edges.scala:460:17] wire [7:0] _rOut_bits_a_mask_T; // @[Misc.scala:222:10] assign rOut_bits_mask = rOut_bits_a_mask; // @[Edges.scala:460:17] wire [1:0] rOut_bits_a_mask_sizeOH_shiftAmount = _rOut_bits_a_mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _rOut_bits_a_mask_sizeOH_T_1 = 4'h1 << rOut_bits_a_mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _rOut_bits_a_mask_sizeOH_T_2 = _rOut_bits_a_mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] rOut_bits_a_mask_sizeOH = {_rOut_bits_a_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire rOut_bits_a_mask_sub_sub_sub_0_1 = rSize > 4'h2; // @[OneHot.scala:32:10] wire rOut_bits_a_mask_sub_sub_size = rOut_bits_a_mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire rOut_bits_a_mask_sub_sub_bit = rAddr[2]; // @[Misc.scala:210:26] wire rOut_bits_a_mask_sub_sub_1_2 = rOut_bits_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire rOut_bits_a_mask_sub_sub_nbit = ~rOut_bits_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire rOut_bits_a_mask_sub_sub_0_2 = rOut_bits_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _rOut_bits_a_mask_sub_sub_acc_T = rOut_bits_a_mask_sub_sub_size & rOut_bits_a_mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_sub_sub_0_1 = rOut_bits_a_mask_sub_sub_sub_0_1 | _rOut_bits_a_mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _rOut_bits_a_mask_sub_sub_acc_T_1 = rOut_bits_a_mask_sub_sub_size & rOut_bits_a_mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_sub_sub_1_1 = rOut_bits_a_mask_sub_sub_sub_0_1 | _rOut_bits_a_mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire rOut_bits_a_mask_sub_size = rOut_bits_a_mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire rOut_bits_a_mask_sub_bit = rAddr[1]; // @[Misc.scala:210:26] wire rOut_bits_a_mask_sub_nbit = ~rOut_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire rOut_bits_a_mask_sub_0_2 = rOut_bits_a_mask_sub_sub_0_2 & rOut_bits_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _rOut_bits_a_mask_sub_acc_T = rOut_bits_a_mask_sub_size & rOut_bits_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_sub_0_1 = rOut_bits_a_mask_sub_sub_0_1 | _rOut_bits_a_mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire rOut_bits_a_mask_sub_1_2 = rOut_bits_a_mask_sub_sub_0_2 & rOut_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _rOut_bits_a_mask_sub_acc_T_1 = rOut_bits_a_mask_sub_size & rOut_bits_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_sub_1_1 = rOut_bits_a_mask_sub_sub_0_1 | _rOut_bits_a_mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire rOut_bits_a_mask_sub_2_2 = rOut_bits_a_mask_sub_sub_1_2 & rOut_bits_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _rOut_bits_a_mask_sub_acc_T_2 = rOut_bits_a_mask_sub_size & rOut_bits_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_sub_2_1 = rOut_bits_a_mask_sub_sub_1_1 | _rOut_bits_a_mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire rOut_bits_a_mask_sub_3_2 = rOut_bits_a_mask_sub_sub_1_2 & rOut_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _rOut_bits_a_mask_sub_acc_T_3 = rOut_bits_a_mask_sub_size & rOut_bits_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_sub_3_1 = rOut_bits_a_mask_sub_sub_1_1 | _rOut_bits_a_mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire rOut_bits_a_mask_size = rOut_bits_a_mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire rOut_bits_a_mask_bit = rAddr[0]; // @[Misc.scala:210:26] wire rOut_bits_a_mask_nbit = ~rOut_bits_a_mask_bit; // @[Misc.scala:210:26, :211:20] wire rOut_bits_a_mask_eq = rOut_bits_a_mask_sub_0_2 & rOut_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _rOut_bits_a_mask_acc_T = rOut_bits_a_mask_size & rOut_bits_a_mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_acc = rOut_bits_a_mask_sub_0_1 | _rOut_bits_a_mask_acc_T; // @[Misc.scala:215:{29,38}] wire rOut_bits_a_mask_eq_1 = rOut_bits_a_mask_sub_0_2 & rOut_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _rOut_bits_a_mask_acc_T_1 = rOut_bits_a_mask_size & rOut_bits_a_mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_acc_1 = rOut_bits_a_mask_sub_0_1 | _rOut_bits_a_mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire rOut_bits_a_mask_eq_2 = rOut_bits_a_mask_sub_1_2 & rOut_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _rOut_bits_a_mask_acc_T_2 = rOut_bits_a_mask_size & rOut_bits_a_mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_acc_2 = rOut_bits_a_mask_sub_1_1 | _rOut_bits_a_mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire rOut_bits_a_mask_eq_3 = rOut_bits_a_mask_sub_1_2 & rOut_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _rOut_bits_a_mask_acc_T_3 = rOut_bits_a_mask_size & rOut_bits_a_mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_acc_3 = rOut_bits_a_mask_sub_1_1 | _rOut_bits_a_mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire rOut_bits_a_mask_eq_4 = rOut_bits_a_mask_sub_2_2 & rOut_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _rOut_bits_a_mask_acc_T_4 = rOut_bits_a_mask_size & rOut_bits_a_mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_acc_4 = rOut_bits_a_mask_sub_2_1 | _rOut_bits_a_mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire rOut_bits_a_mask_eq_5 = rOut_bits_a_mask_sub_2_2 & rOut_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _rOut_bits_a_mask_acc_T_5 = rOut_bits_a_mask_size & rOut_bits_a_mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_acc_5 = rOut_bits_a_mask_sub_2_1 | _rOut_bits_a_mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire rOut_bits_a_mask_eq_6 = rOut_bits_a_mask_sub_3_2 & rOut_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _rOut_bits_a_mask_acc_T_6 = rOut_bits_a_mask_size & rOut_bits_a_mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_acc_6 = rOut_bits_a_mask_sub_3_1 | _rOut_bits_a_mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire rOut_bits_a_mask_eq_7 = rOut_bits_a_mask_sub_3_2 & rOut_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _rOut_bits_a_mask_acc_T_7 = rOut_bits_a_mask_size & rOut_bits_a_mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire rOut_bits_a_mask_acc_7 = rOut_bits_a_mask_sub_3_1 | _rOut_bits_a_mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] rOut_bits_a_mask_lo_lo = {rOut_bits_a_mask_acc_1, rOut_bits_a_mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] rOut_bits_a_mask_lo_hi = {rOut_bits_a_mask_acc_3, rOut_bits_a_mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] rOut_bits_a_mask_lo = {rOut_bits_a_mask_lo_hi, rOut_bits_a_mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] rOut_bits_a_mask_hi_lo = {rOut_bits_a_mask_acc_5, rOut_bits_a_mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] rOut_bits_a_mask_hi_hi = {rOut_bits_a_mask_acc_7, rOut_bits_a_mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] rOut_bits_a_mask_hi = {rOut_bits_a_mask_hi_hi, rOut_bits_a_mask_hi_lo}; // @[Misc.scala:222:10] assign _rOut_bits_a_mask_T = {rOut_bits_a_mask_hi, rOut_bits_a_mask_lo}; // @[Misc.scala:222:10] assign rOut_bits_a_mask = _rOut_bits_a_mask_T; // @[Misc.scala:222:10] wire [4:0] strippedResponseSourceId = nodeOut_d_bits_source[4:0]; // @[MixedNode.scala:542:17] wire [4:0] usedWriteIdsClr_shiftAmount = strippedResponseSourceId; // @[OneHot.scala:64:49] reg writeBurstBusy; // @[UnsafeAXI4ToTL.scala:162:35] wire _writeBurstBusy_T = ~nodeIn_w_bits_last; // @[MixedNode.scala:551:17] reg [31:0] usedWriteIds; // @[UnsafeAXI4ToTL.scala:167:33] wire _canIssueW_T = &usedWriteIds; // @[UnsafeAXI4ToTL.scala:167:33, :168:40] wire canIssueW = ~_canIssueW_T; // @[UnsafeAXI4ToTL.scala:168:{26,40}] wire [31:0] usedWriteIdsSet; // @[UnsafeAXI4ToTL.scala:170:40] wire [31:0] usedWriteIdsClr; // @[UnsafeAXI4ToTL.scala:171:40] wire [31:0] _usedWriteIds_T = ~usedWriteIdsClr; // @[UnsafeAXI4ToTL.scala:171:40, :173:39] wire [31:0] _usedWriteIds_T_1 = usedWriteIds & _usedWriteIds_T; // @[UnsafeAXI4ToTL.scala:167:33, :173:{37,39}] wire [31:0] _usedWriteIds_T_2 = _usedWriteIds_T_1 | usedWriteIdsSet; // @[UnsafeAXI4ToTL.scala:170:40, :173:{37,57}] wire [31:0] freeWriteIdOHRaw; // @[UnsafeAXI4ToTL.scala:177:34] wire _freeWriteIdOH_T = ~writeBurstBusy; // @[UnsafeAXI4ToTL.scala:162:35, :178:58] reg [31:0] freeWriteIdOH_r; // @[package.scala:88:63] wire [31:0] freeWriteIdOH = _freeWriteIdOH_T ? freeWriteIdOHRaw : freeWriteIdOH_r; // @[package.scala:88:{42,63}] wire [15:0] freeWriteIdIndex_hi = freeWriteIdOH[31:16]; // @[OneHot.scala:30:18] wire [15:0] freeWriteIdIndex_lo = freeWriteIdOH[15:0]; // @[OneHot.scala:31:18] wire _freeWriteIdIndex_T = |freeWriteIdIndex_hi; // @[OneHot.scala:30:18, :32:14] wire [15:0] _freeWriteIdIndex_T_1 = freeWriteIdIndex_hi | freeWriteIdIndex_lo; // @[OneHot.scala:30:18, :31:18, :32:28] wire [7:0] freeWriteIdIndex_hi_1 = _freeWriteIdIndex_T_1[15:8]; // @[OneHot.scala:30:18, :32:28] wire [7:0] freeWriteIdIndex_lo_1 = _freeWriteIdIndex_T_1[7:0]; // @[OneHot.scala:31:18, :32:28] wire _freeWriteIdIndex_T_2 = |freeWriteIdIndex_hi_1; // @[OneHot.scala:30:18, :32:14] wire [7:0] _freeWriteIdIndex_T_3 = freeWriteIdIndex_hi_1 | freeWriteIdIndex_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28] wire [3:0] freeWriteIdIndex_hi_2 = _freeWriteIdIndex_T_3[7:4]; // @[OneHot.scala:30:18, :32:28] wire [3:0] freeWriteIdIndex_lo_2 = _freeWriteIdIndex_T_3[3:0]; // @[OneHot.scala:31:18, :32:28] wire _freeWriteIdIndex_T_4 = |freeWriteIdIndex_hi_2; // @[OneHot.scala:30:18, :32:14] wire [3:0] _freeWriteIdIndex_T_5 = freeWriteIdIndex_hi_2 | freeWriteIdIndex_lo_2; // @[OneHot.scala:30:18, :31:18, :32:28] wire [1:0] freeWriteIdIndex_hi_3 = _freeWriteIdIndex_T_5[3:2]; // @[OneHot.scala:30:18, :32:28] wire [1:0] freeWriteIdIndex_lo_3 = _freeWriteIdIndex_T_5[1:0]; // @[OneHot.scala:31:18, :32:28] wire _freeWriteIdIndex_T_6 = |freeWriteIdIndex_hi_3; // @[OneHot.scala:30:18, :32:14] wire [1:0] _freeWriteIdIndex_T_7 = freeWriteIdIndex_hi_3 | freeWriteIdIndex_lo_3; // @[OneHot.scala:30:18, :31:18, :32:28] wire _freeWriteIdIndex_T_8 = _freeWriteIdIndex_T_7[1]; // @[OneHot.scala:32:28] wire [1:0] _freeWriteIdIndex_T_9 = {_freeWriteIdIndex_T_6, _freeWriteIdIndex_T_8}; // @[OneHot.scala:32:{10,14}] wire [2:0] _freeWriteIdIndex_T_10 = {_freeWriteIdIndex_T_4, _freeWriteIdIndex_T_9}; // @[OneHot.scala:32:{10,14}] wire [3:0] _freeWriteIdIndex_T_11 = {_freeWriteIdIndex_T_2, _freeWriteIdIndex_T_10}; // @[OneHot.scala:32:{10,14}] wire [4:0] freeWriteIdIndex = {_freeWriteIdIndex_T, _freeWriteIdIndex_T_11}; // @[OneHot.scala:32:{10,14}] wire [31:0] _freeWriteIdOHRaw_T = ~usedWriteIds; // @[UnsafeAXI4ToTL.scala:167:33, :180:36] wire [32:0] _freeWriteIdOHRaw_T_1 = {_freeWriteIdOHRaw_T, 1'h0}; // @[package.scala:253:48] wire [31:0] _freeWriteIdOHRaw_T_2 = _freeWriteIdOHRaw_T_1[31:0]; // @[package.scala:253:{48,53}] wire [31:0] _freeWriteIdOHRaw_T_3 = _freeWriteIdOHRaw_T | _freeWriteIdOHRaw_T_2; // @[package.scala:253:{43,53}] wire [33:0] _freeWriteIdOHRaw_T_4 = {_freeWriteIdOHRaw_T_3, 2'h0}; // @[package.scala:253:{43,48}] wire [31:0] _freeWriteIdOHRaw_T_5 = _freeWriteIdOHRaw_T_4[31:0]; // @[package.scala:253:{48,53}] wire [31:0] _freeWriteIdOHRaw_T_6 = _freeWriteIdOHRaw_T_3 | _freeWriteIdOHRaw_T_5; // @[package.scala:253:{43,53}] wire [35:0] _freeWriteIdOHRaw_T_7 = {_freeWriteIdOHRaw_T_6, 4'h0}; // @[package.scala:253:{43,48}] wire [31:0] _freeWriteIdOHRaw_T_8 = _freeWriteIdOHRaw_T_7[31:0]; // @[package.scala:253:{48,53}] wire [31:0] _freeWriteIdOHRaw_T_9 = _freeWriteIdOHRaw_T_6 | _freeWriteIdOHRaw_T_8; // @[package.scala:253:{43,53}] wire [39:0] _freeWriteIdOHRaw_T_10 = {_freeWriteIdOHRaw_T_9, 8'h0}; // @[package.scala:253:{43,48}] wire [31:0] _freeWriteIdOHRaw_T_11 = _freeWriteIdOHRaw_T_10[31:0]; // @[package.scala:253:{48,53}] wire [31:0] _freeWriteIdOHRaw_T_12 = _freeWriteIdOHRaw_T_9 | _freeWriteIdOHRaw_T_11; // @[package.scala:253:{43,53}] wire [47:0] _freeWriteIdOHRaw_T_13 = {_freeWriteIdOHRaw_T_12, 16'h0}; // @[package.scala:253:{43,48}] wire [31:0] _freeWriteIdOHRaw_T_14 = _freeWriteIdOHRaw_T_13[31:0]; // @[package.scala:253:{48,53}] wire [31:0] _freeWriteIdOHRaw_T_15 = _freeWriteIdOHRaw_T_12 | _freeWriteIdOHRaw_T_14; // @[package.scala:253:{43,53}] wire [31:0] _freeWriteIdOHRaw_T_16 = _freeWriteIdOHRaw_T_15; // @[package.scala:253:43, :254:17] wire [32:0] _freeWriteIdOHRaw_T_17 = {_freeWriteIdOHRaw_T_16, 1'h0}; // @[package.scala:254:17] wire [32:0] _freeWriteIdOHRaw_T_18 = ~_freeWriteIdOHRaw_T_17; // @[UnsafeAXI4ToTL.scala:180:{27,51}] wire [31:0] _freeWriteIdOHRaw_T_19 = ~usedWriteIds; // @[UnsafeAXI4ToTL.scala:167:33, :180:{36,59}] wire [32:0] _freeWriteIdOHRaw_T_20 = {1'h0, _freeWriteIdOHRaw_T_18[31:0] & _freeWriteIdOHRaw_T_19}; // @[UnsafeAXI4ToTL.scala:180:{27,57,59}] assign freeWriteIdOHRaw = _freeWriteIdOHRaw_T_20[31:0]; // @[UnsafeAXI4ToTL.scala:177:34, :180:{24,57}] wire _wOut_ready_T; // @[Arbiter.scala:94:31] wire _wOut_valid_T_1; // @[UnsafeAXI4ToTL.scala:199:48] wire [3:0] wOut_bits_a_size; // @[Edges.scala:500:17] wire [5:0] wOut_bits_a_source; // @[Edges.scala:500:17] wire [31:0] wOut_bits_a_address; // @[Edges.scala:500:17] wire [3:0] wOut_bits_size; // @[UnsafeAXI4ToTL.scala:182:25] wire [5:0] wOut_bits_source; // @[UnsafeAXI4ToTL.scala:182:25] wire [31:0] wOut_bits_address; // @[UnsafeAXI4ToTL.scala:182:25] wire [7:0] wOut_bits_mask; // @[UnsafeAXI4ToTL.scala:182:25] wire [63:0] wOut_bits_data; // @[UnsafeAXI4ToTL.scala:182:25] wire wOut_ready; // @[UnsafeAXI4ToTL.scala:182:25] wire wOut_valid; // @[UnsafeAXI4ToTL.scala:182:25] wire [15:0] _wBytes1_T = {nodeIn_aw_bits_len, 8'hFF}; // @[Bundles.scala:33:9] wire [22:0] _wBytes1_T_1 = {7'h0, _wBytes1_T} << nodeIn_aw_bits_size; // @[Bundles.scala:33:{9,21}] wire [14:0] wBytes1 = _wBytes1_T_1[22:8]; // @[Bundles.scala:33:{21,30}] wire [15:0] _wSize_T = {wBytes1, 1'h0}; // @[package.scala:241:35] wire [15:0] _wSize_T_1 = {_wSize_T[15:1], 1'h1}; // @[package.scala:241:{35,40}] wire [15:0] _wSize_T_2 = {1'h0, wBytes1}; // @[package.scala:241:53] wire [15:0] _wSize_T_3 = ~_wSize_T_2; // @[package.scala:241:{49,53}] wire [15:0] _wSize_T_4 = _wSize_T_1 & _wSize_T_3; // @[package.scala:241:{40,47,49}] wire [7:0] wSize_hi = _wSize_T_4[15:8]; // @[OneHot.scala:30:18] wire [7:0] wSize_lo = _wSize_T_4[7:0]; // @[OneHot.scala:31:18] wire _wSize_T_5 = |wSize_hi; // @[OneHot.scala:30:18, :32:14] wire [7:0] _wSize_T_6 = wSize_hi | wSize_lo; // @[OneHot.scala:30:18, :31:18, :32:28] wire [3:0] wSize_hi_1 = _wSize_T_6[7:4]; // @[OneHot.scala:30:18, :32:28] wire [3:0] wSize_lo_1 = _wSize_T_6[3:0]; // @[OneHot.scala:31:18, :32:28] wire _wSize_T_7 = |wSize_hi_1; // @[OneHot.scala:30:18, :32:14] wire [3:0] _wSize_T_8 = wSize_hi_1 | wSize_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28] wire [1:0] wSize_hi_2 = _wSize_T_8[3:2]; // @[OneHot.scala:30:18, :32:28] wire [1:0] wSize_lo_2 = _wSize_T_8[1:0]; // @[OneHot.scala:31:18, :32:28] wire _wSize_T_9 = |wSize_hi_2; // @[OneHot.scala:30:18, :32:14] wire [1:0] _wSize_T_10 = wSize_hi_2 | wSize_lo_2; // @[OneHot.scala:30:18, :31:18, :32:28] wire _wSize_T_11 = _wSize_T_10[1]; // @[OneHot.scala:32:28] wire [1:0] _wSize_T_12 = {_wSize_T_9, _wSize_T_11}; // @[OneHot.scala:32:{10,14}] wire [2:0] _wSize_T_13 = {_wSize_T_7, _wSize_T_12}; // @[OneHot.scala:32:{10,14}] wire [3:0] wSize = {_wSize_T_5, _wSize_T_13}; // @[OneHot.scala:32:{10,14}] assign wOut_bits_a_size = wSize; // @[OneHot.scala:32:10] wire _GEN_3 = wSize < 4'hD; // @[OneHot.scala:32:10] wire _wOk_T_1; // @[Parameters.scala:92:38] assign _wOk_T_1 = _GEN_3; // @[Parameters.scala:92:38] wire _wOut_bits_legal_T_1; // @[Parameters.scala:92:38] assign _wOut_bits_legal_T_1 = _GEN_3; // @[Parameters.scala:92:38] wire _wOk_T_2 = _wOk_T_1; // @[Parameters.scala:92:{33,38}] wire _wOk_T_3 = _wOk_T_2; // @[Parameters.scala:684:29] wire [31:0] _wOk_T_4 = {nodeIn_aw_bits_addr[31:14], nodeIn_aw_bits_addr[13:0] ^ 14'h3000}; // @[Parameters.scala:137:31] wire [32:0] _wOk_T_5 = {1'h0, _wOk_T_4}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOk_T_6 = _wOk_T_5 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOk_T_7 = _wOk_T_6; // @[Parameters.scala:137:46] wire _wOk_T_8 = _wOk_T_7 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _wOk_T_9 = _wOk_T_3 & _wOk_T_8; // @[Parameters.scala:684:{29,54}] wire _wOk_T_69 = _wOk_T_9; // @[Parameters.scala:684:54, :686:26] wire _GEN_4 = wSize < 4'h7; // @[OneHot.scala:32:10] wire _wOk_T_11; // @[Parameters.scala:92:38] assign _wOk_T_11 = _GEN_4; // @[Parameters.scala:92:38] wire _wOut_bits_legal_T_11; // @[Parameters.scala:92:38] assign _wOut_bits_legal_T_11 = _GEN_4; // @[Parameters.scala:92:38] wire _wOk_T_12 = _wOk_T_11; // @[Parameters.scala:92:{33,38}] wire _wOk_T_13 = _wOk_T_12; // @[Parameters.scala:684:29] wire [32:0] _wOk_T_15 = {1'h0, _wOk_T_14}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOk_T_16 = _wOk_T_15 & 33'h1FFFFE000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOk_T_17 = _wOk_T_16; // @[Parameters.scala:137:46] wire _wOk_T_18 = _wOk_T_17 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _wOk_T_19 = {nodeIn_aw_bits_addr[31:21], nodeIn_aw_bits_addr[20:0] ^ 21'h100000}; // @[Parameters.scala:137:31] wire [32:0] _wOk_T_20 = {1'h0, _wOk_T_19}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOk_T_21 = _wOk_T_20 & 33'h1FFFEF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOk_T_22 = _wOk_T_21; // @[Parameters.scala:137:46] wire _wOk_T_23 = _wOk_T_22 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _wOk_T_24 = {nodeIn_aw_bits_addr[31:26], nodeIn_aw_bits_addr[25:0] ^ 26'h2000000}; // @[Parameters.scala:137:31] wire [32:0] _wOk_T_25 = {1'h0, _wOk_T_24}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOk_T_26 = _wOk_T_25 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOk_T_27 = _wOk_T_26; // @[Parameters.scala:137:46] wire _wOk_T_28 = _wOk_T_27 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _wOk_T_29 = {nodeIn_aw_bits_addr[31:26], nodeIn_aw_bits_addr[25:0] ^ 26'h2010000}; // @[Parameters.scala:137:31] wire [32:0] _wOk_T_30 = {1'h0, _wOk_T_29}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOk_T_31 = _wOk_T_30 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOk_T_32 = _wOk_T_31; // @[Parameters.scala:137:46] wire _wOk_T_33 = _wOk_T_32 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _wOk_T_34 = {nodeIn_aw_bits_addr[31:28], nodeIn_aw_bits_addr[27:0] ^ 28'h8000000}; // @[Parameters.scala:137:31] wire [32:0] _wOk_T_35 = {1'h0, _wOk_T_34}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOk_T_36 = _wOk_T_35 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOk_T_37 = _wOk_T_36; // @[Parameters.scala:137:46] wire _wOk_T_38 = _wOk_T_37 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _wOk_T_39 = {nodeIn_aw_bits_addr[31:28], nodeIn_aw_bits_addr[27:0] ^ 28'hC000000}; // @[Parameters.scala:137:31] wire [32:0] _wOk_T_40 = {1'h0, _wOk_T_39}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOk_T_41 = _wOk_T_40 & 33'h1FC000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOk_T_42 = _wOk_T_41; // @[Parameters.scala:137:46] wire _wOk_T_43 = _wOk_T_42 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _wOk_T_44 = {nodeIn_aw_bits_addr[31:29], nodeIn_aw_bits_addr[28:0] ^ 29'h10020000}; // @[Parameters.scala:137:31] wire [32:0] _wOk_T_45 = {1'h0, _wOk_T_44}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOk_T_46 = _wOk_T_45 & 33'h1FFFFF000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOk_T_47 = _wOk_T_46; // @[Parameters.scala:137:46] wire _wOk_T_48 = _wOk_T_47 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _wOk_T_49 = nodeIn_aw_bits_addr ^ 32'h80000000; // @[Parameters.scala:137:31] wire [32:0] _wOk_T_50 = {1'h0, _wOk_T_49}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOk_T_51 = _wOk_T_50 & 33'h1F0000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOk_T_52 = _wOk_T_51; // @[Parameters.scala:137:46] wire _wOk_T_53 = _wOk_T_52 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _wOk_T_54 = _wOk_T_18 | _wOk_T_23; // @[Parameters.scala:685:42] wire _wOk_T_55 = _wOk_T_54 | _wOk_T_28; // @[Parameters.scala:685:42] wire _wOk_T_56 = _wOk_T_55 | _wOk_T_33; // @[Parameters.scala:685:42] wire _wOk_T_57 = _wOk_T_56 | _wOk_T_38; // @[Parameters.scala:685:42] wire _wOk_T_58 = _wOk_T_57 | _wOk_T_43; // @[Parameters.scala:685:42] wire _wOk_T_59 = _wOk_T_58 | _wOk_T_48; // @[Parameters.scala:685:42] wire _wOk_T_60 = _wOk_T_59 | _wOk_T_53; // @[Parameters.scala:685:42] wire _wOk_T_61 = _wOk_T_13 & _wOk_T_60; // @[Parameters.scala:684:{29,54}, :685:42] wire [31:0] _wOk_T_63 = {nodeIn_aw_bits_addr[31:17], nodeIn_aw_bits_addr[16:0] ^ 17'h10000}; // @[Parameters.scala:137:31] wire [32:0] _wOk_T_64 = {1'h0, _wOk_T_63}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOk_T_65 = _wOk_T_64 & 33'h1FFFF0000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOk_T_66 = _wOk_T_65; // @[Parameters.scala:137:46] wire _wOk_T_67 = _wOk_T_66 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _wOk_T_70 = _wOk_T_69 | _wOk_T_61; // @[Parameters.scala:684:54, :686:26] wire wOk = _wOk_T_70; // @[Parameters.scala:686:26] wire [5:0] wId = {1'h1, freeWriteIdIndex}; // @[OneHot.scala:32:10] assign wOut_bits_a_source = wId; // @[Edges.scala:500:17] wire [2:0] _wAddr_T = nodeIn_aw_bits_addr[2:0]; // @[MixedNode.scala:551:17] wire [13:0] _wAddr_T_1 = {11'h600, _wAddr_T}; // @[UnsafeAXI4ToTL.scala:191:{60,77}] wire [31:0] wAddr = wOk ? nodeIn_aw_bits_addr : {18'h0, _wAddr_T_1}; // @[Parameters.scala:686:26] wire [31:0] _wOut_bits_legal_T_14 = wAddr; // @[Parameters.scala:137:31] assign wOut_bits_a_address = wAddr; // @[Edges.scala:500:17] wire _nodeIn_aw_ready_T = wOut_ready & nodeIn_w_valid; // @[MixedNode.scala:551:17] wire _nodeIn_aw_ready_T_1 = _nodeIn_aw_ready_T & nodeIn_w_bits_last; // @[MixedNode.scala:551:17] assign _nodeIn_aw_ready_T_2 = _nodeIn_aw_ready_T_1 & canIssueW; // @[UnsafeAXI4ToTL.scala:168:26, :197:{47,65}] assign nodeIn_aw_ready = _nodeIn_aw_ready_T_2; // @[MixedNode.scala:551:17] wire _nodeIn_w_ready_T = wOut_ready & nodeIn_aw_valid; // @[MixedNode.scala:551:17] assign _nodeIn_w_ready_T_1 = _nodeIn_w_ready_T & canIssueW; // @[UnsafeAXI4ToTL.scala:168:26, :198:{33,48}] assign nodeIn_w_ready = _nodeIn_w_ready_T_1; // @[MixedNode.scala:551:17] wire _wOut_valid_T = nodeIn_aw_valid & nodeIn_w_valid; // @[MixedNode.scala:551:17] assign _wOut_valid_T_1 = _wOut_valid_T & canIssueW; // @[UnsafeAXI4ToTL.scala:168:26, :199:{34,48}] assign wOut_valid = _wOut_valid_T_1; // @[UnsafeAXI4ToTL.scala:182:25, :199:48] wire _wOut_bits_legal_T_2 = _wOut_bits_legal_T_1; // @[Parameters.scala:92:{33,38}] wire _wOut_bits_legal_T_3 = _wOut_bits_legal_T_2; // @[Parameters.scala:684:29] wire [31:0] _wOut_bits_legal_T_4 = {wAddr[31:14], wAddr[13:0] ^ 14'h3000}; // @[Parameters.scala:137:31] wire [32:0] _wOut_bits_legal_T_5 = {1'h0, _wOut_bits_legal_T_4}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOut_bits_legal_T_6 = _wOut_bits_legal_T_5 & 33'h9A113000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOut_bits_legal_T_7 = _wOut_bits_legal_T_6; // @[Parameters.scala:137:46] wire _wOut_bits_legal_T_8 = _wOut_bits_legal_T_7 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _wOut_bits_legal_T_9 = _wOut_bits_legal_T_3 & _wOut_bits_legal_T_8; // @[Parameters.scala:684:{29,54}] wire _wOut_bits_legal_T_69 = _wOut_bits_legal_T_9; // @[Parameters.scala:684:54, :686:26] wire _wOut_bits_legal_T_12 = _wOut_bits_legal_T_11; // @[Parameters.scala:92:{33,38}] wire _wOut_bits_legal_T_13 = _wOut_bits_legal_T_12; // @[Parameters.scala:684:29] wire [32:0] _wOut_bits_legal_T_15 = {1'h0, _wOut_bits_legal_T_14}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOut_bits_legal_T_16 = _wOut_bits_legal_T_15 & 33'h9A112000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOut_bits_legal_T_17 = _wOut_bits_legal_T_16; // @[Parameters.scala:137:46] wire _wOut_bits_legal_T_18 = _wOut_bits_legal_T_17 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _wOut_bits_legal_T_19 = {wAddr[31:21], wAddr[20:0] ^ 21'h100000}; // @[Parameters.scala:137:31] wire [32:0] _wOut_bits_legal_T_20 = {1'h0, _wOut_bits_legal_T_19}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOut_bits_legal_T_21 = _wOut_bits_legal_T_20 & 33'h9A103000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOut_bits_legal_T_22 = _wOut_bits_legal_T_21; // @[Parameters.scala:137:46] wire _wOut_bits_legal_T_23 = _wOut_bits_legal_T_22 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _wOut_bits_legal_T_24 = {wAddr[31:26], wAddr[25:0] ^ 26'h2000000}; // @[Parameters.scala:137:31] wire [32:0] _wOut_bits_legal_T_25 = {1'h0, _wOut_bits_legal_T_24}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOut_bits_legal_T_26 = _wOut_bits_legal_T_25 & 33'h9A110000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOut_bits_legal_T_27 = _wOut_bits_legal_T_26; // @[Parameters.scala:137:46] wire _wOut_bits_legal_T_28 = _wOut_bits_legal_T_27 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _wOut_bits_legal_T_29 = {wAddr[31:26], wAddr[25:0] ^ 26'h2010000}; // @[Parameters.scala:137:31] wire [32:0] _wOut_bits_legal_T_30 = {1'h0, _wOut_bits_legal_T_29}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOut_bits_legal_T_31 = _wOut_bits_legal_T_30 & 33'h9A113000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOut_bits_legal_T_32 = _wOut_bits_legal_T_31; // @[Parameters.scala:137:46] wire _wOut_bits_legal_T_33 = _wOut_bits_legal_T_32 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _GEN_5 = {wAddr[31:28], wAddr[27:0] ^ 28'h8000000}; // @[Parameters.scala:137:31] wire [31:0] _wOut_bits_legal_T_34; // @[Parameters.scala:137:31] assign _wOut_bits_legal_T_34 = _GEN_5; // @[Parameters.scala:137:31] wire [31:0] _wOut_bits_legal_T_39; // @[Parameters.scala:137:31] assign _wOut_bits_legal_T_39 = _GEN_5; // @[Parameters.scala:137:31] wire [32:0] _wOut_bits_legal_T_35 = {1'h0, _wOut_bits_legal_T_34}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOut_bits_legal_T_36 = _wOut_bits_legal_T_35 & 33'h98000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOut_bits_legal_T_37 = _wOut_bits_legal_T_36; // @[Parameters.scala:137:46] wire _wOut_bits_legal_T_38 = _wOut_bits_legal_T_37 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [32:0] _wOut_bits_legal_T_40 = {1'h0, _wOut_bits_legal_T_39}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOut_bits_legal_T_41 = _wOut_bits_legal_T_40 & 33'h9A110000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOut_bits_legal_T_42 = _wOut_bits_legal_T_41; // @[Parameters.scala:137:46] wire _wOut_bits_legal_T_43 = _wOut_bits_legal_T_42 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _wOut_bits_legal_T_44 = {wAddr[31:29], wAddr[28:0] ^ 29'h10000000}; // @[Parameters.scala:137:31] wire [32:0] _wOut_bits_legal_T_45 = {1'h0, _wOut_bits_legal_T_44}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOut_bits_legal_T_46 = _wOut_bits_legal_T_45 & 33'h9A113000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOut_bits_legal_T_47 = _wOut_bits_legal_T_46; // @[Parameters.scala:137:46] wire _wOut_bits_legal_T_48 = _wOut_bits_legal_T_47 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _wOut_bits_legal_T_49 = wAddr ^ 32'h80000000; // @[Parameters.scala:137:31] wire [32:0] _wOut_bits_legal_T_50 = {1'h0, _wOut_bits_legal_T_49}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOut_bits_legal_T_51 = _wOut_bits_legal_T_50 & 33'h90000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOut_bits_legal_T_52 = _wOut_bits_legal_T_51; // @[Parameters.scala:137:46] wire _wOut_bits_legal_T_53 = _wOut_bits_legal_T_52 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _wOut_bits_legal_T_54 = _wOut_bits_legal_T_18 | _wOut_bits_legal_T_23; // @[Parameters.scala:685:42] wire _wOut_bits_legal_T_55 = _wOut_bits_legal_T_54 | _wOut_bits_legal_T_28; // @[Parameters.scala:685:42] wire _wOut_bits_legal_T_56 = _wOut_bits_legal_T_55 | _wOut_bits_legal_T_33; // @[Parameters.scala:685:42] wire _wOut_bits_legal_T_57 = _wOut_bits_legal_T_56 | _wOut_bits_legal_T_38; // @[Parameters.scala:685:42] wire _wOut_bits_legal_T_58 = _wOut_bits_legal_T_57 | _wOut_bits_legal_T_43; // @[Parameters.scala:685:42] wire _wOut_bits_legal_T_59 = _wOut_bits_legal_T_58 | _wOut_bits_legal_T_48; // @[Parameters.scala:685:42] wire _wOut_bits_legal_T_60 = _wOut_bits_legal_T_59 | _wOut_bits_legal_T_53; // @[Parameters.scala:685:42] wire _wOut_bits_legal_T_61 = _wOut_bits_legal_T_13 & _wOut_bits_legal_T_60; // @[Parameters.scala:684:{29,54}, :685:42] wire [31:0] _wOut_bits_legal_T_63 = {wAddr[31:17], wAddr[16:0] ^ 17'h10000}; // @[Parameters.scala:137:31] wire [32:0] _wOut_bits_legal_T_64 = {1'h0, _wOut_bits_legal_T_63}; // @[Parameters.scala:137:{31,41}] wire [32:0] _wOut_bits_legal_T_65 = _wOut_bits_legal_T_64 & 33'h9A110000; // @[Parameters.scala:137:{41,46}] wire [32:0] _wOut_bits_legal_T_66 = _wOut_bits_legal_T_65; // @[Parameters.scala:137:46] wire _wOut_bits_legal_T_67 = _wOut_bits_legal_T_66 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _wOut_bits_legal_T_70 = _wOut_bits_legal_T_69 | _wOut_bits_legal_T_61; // @[Parameters.scala:684:54, :686:26] wire wOut_bits_legal = _wOut_bits_legal_T_70; // @[Parameters.scala:686:26] assign wOut_bits_size = wOut_bits_a_size; // @[Edges.scala:500:17] assign wOut_bits_source = wOut_bits_a_source; // @[Edges.scala:500:17] assign wOut_bits_address = wOut_bits_a_address; // @[Edges.scala:500:17] assign wOut_bits_mask = wOut_bits_a_mask; // @[Edges.scala:500:17] assign wOut_bits_data = wOut_bits_a_data; // @[Edges.scala:500:17] reg [7:0] beatsLeft; // @[Arbiter.scala:60:30] wire idle = beatsLeft == 8'h0; // @[Arbiter.scala:60:30, :61:28] wire latch = idle & nodeOut_a_ready; // @[Arbiter.scala:61:28, :62:24] wire [1:0] _readys_T = {wOut_valid, rOut_valid}; // @[Arbiter.scala:68:51] wire [1:0] readys_valid = _readys_T; // @[Arbiter.scala:21:23, :68:51] wire _readys_T_1 = readys_valid == _readys_T; // @[Arbiter.scala:21:23, :22:19, :68:51] wire _readys_T_3 = ~_readys_T_2; // @[Arbiter.scala:22:12] wire _readys_T_4 = ~_readys_T_1; // @[Arbiter.scala:22:{12,19}] reg [1:0] readys_mask; // @[Arbiter.scala:23:23] wire [1:0] _readys_filter_T = ~readys_mask; // @[Arbiter.scala:23:23, :24:30] wire [1:0] _readys_filter_T_1 = readys_valid & _readys_filter_T; // @[Arbiter.scala:21:23, :24:{28,30}] wire [3:0] readys_filter = {_readys_filter_T_1, readys_valid}; // @[Arbiter.scala:21:23, :24:{21,28}] wire [2:0] _readys_unready_T = readys_filter[3:1]; // @[package.scala:262:48] wire [3:0] _readys_unready_T_1 = {readys_filter[3], readys_filter[2:0] | _readys_unready_T}; // @[package.scala:262:{43,48}] wire [3:0] _readys_unready_T_2 = _readys_unready_T_1; // @[package.scala:262:43, :263:17] wire [2:0] _readys_unready_T_3 = _readys_unready_T_2[3:1]; // @[package.scala:263:17] wire [3:0] _readys_unready_T_4 = {readys_mask, 2'h0}; // @[Arbiter.scala:23:23, :25:66] wire [3:0] readys_unready = {1'h0, _readys_unready_T_3} | _readys_unready_T_4; // @[Arbiter.scala:25:{52,58,66}] wire [1:0] _readys_readys_T = readys_unready[3:2]; // @[Arbiter.scala:25:58, :26:29] wire [1:0] _readys_readys_T_1 = readys_unready[1:0]; // @[Arbiter.scala:25:58, :26:48] wire [1:0] _readys_readys_T_2 = _readys_readys_T & _readys_readys_T_1; // @[Arbiter.scala:26:{29,39,48}] wire [1:0] readys_readys = ~_readys_readys_T_2; // @[Arbiter.scala:26:{18,39}] wire [1:0] _readys_T_7 = readys_readys; // @[Arbiter.scala:26:18, :30:11] wire _readys_T_5 = |readys_valid; // @[Arbiter.scala:21:23, :27:27] wire _readys_T_6 = latch & _readys_T_5; // @[Arbiter.scala:27:{18,27}, :62:24] wire [1:0] _readys_mask_T = readys_readys & readys_valid; // @[Arbiter.scala:21:23, :26:18, :28:29] wire [2:0] _readys_mask_T_1 = {_readys_mask_T, 1'h0}; // @[package.scala:253:48] wire [1:0] _readys_mask_T_2 = _readys_mask_T_1[1:0]; // @[package.scala:253:{48,53}] wire [1:0] _readys_mask_T_3 = _readys_mask_T | _readys_mask_T_2; // @[package.scala:253:{43,53}] wire [1:0] _readys_mask_T_4 = _readys_mask_T_3; // @[package.scala:253:43, :254:17] wire _readys_T_8 = _readys_T_7[0]; // @[Arbiter.scala:30:11, :68:76] wire readys_0 = _readys_T_8; // @[Arbiter.scala:68:{27,76}] wire _readys_T_9 = _readys_T_7[1]; // @[Arbiter.scala:30:11, :68:76] wire readys_1 = _readys_T_9; // @[Arbiter.scala:68:{27,76}] wire _winner_T = readys_0 & rOut_valid; // @[Arbiter.scala:68:27, :71:69] wire winner_0 = _winner_T; // @[Arbiter.scala:71:{27,69}] wire _winner_T_1 = readys_1 & wOut_valid; // @[Arbiter.scala:68:27, :71:69] wire winner_1 = _winner_T_1; // @[Arbiter.scala:71:{27,69}] wire prefixOR_1 = winner_0; // @[Arbiter.scala:71:27, :76:48] wire _prefixOR_T = prefixOR_1 | winner_1; // @[Arbiter.scala:71:27, :76:48] wire _nodeOut_a_valid_T = rOut_valid | wOut_valid; // @[Arbiter.scala:79:31, :96:46] wire [7:0] maskedBeats_1 = winner_1 ? nodeIn_aw_bits_len : 8'h0; // @[Arbiter.scala:71:27, :82:69] wire [7:0] initBeats = maskedBeats_1; // @[Arbiter.scala:82:69, :84:44] wire _beatsLeft_T = nodeOut_a_ready & nodeOut_a_valid; // @[Decoupled.scala:51:35] wire [8:0] _beatsLeft_T_1 = {1'h0, beatsLeft} - {8'h0, _beatsLeft_T}; // @[Decoupled.scala:51:35] wire [7:0] _beatsLeft_T_2 = _beatsLeft_T_1[7:0]; // @[Arbiter.scala:85:52] wire [7:0] _beatsLeft_T_3 = latch ? initBeats : _beatsLeft_T_2; // @[Arbiter.scala:62:24, :84:44, :85:{23,52}] reg state_0; // @[Arbiter.scala:88:26] reg state_1; // @[Arbiter.scala:88:26] wire muxState_0 = idle ? winner_0 : state_0; // @[Arbiter.scala:61:28, :71:27, :88:26, :89:25] wire muxState_1 = idle ? winner_1 : state_1; // @[Arbiter.scala:61:28, :71:27, :88:26, :89:25] wire allowed_0 = idle ? readys_0 : state_0; // @[Arbiter.scala:61:28, :68:27, :88:26, :92:24] wire allowed_1 = idle ? readys_1 : state_1; // @[Arbiter.scala:61:28, :68:27, :88:26, :92:24] assign _rOut_ready_T = nodeOut_a_ready & allowed_0; // @[Arbiter.scala:92:24, :94:31] assign rOut_ready = _rOut_ready_T; // @[Arbiter.scala:94:31] assign _wOut_ready_T = nodeOut_a_ready & allowed_1; // @[Arbiter.scala:92:24, :94:31] assign wOut_ready = _wOut_ready_T; // @[Arbiter.scala:94:31] wire _nodeOut_a_valid_T_1 = state_0 & rOut_valid; // @[Mux.scala:30:73] wire _nodeOut_a_valid_T_2 = state_1 & wOut_valid; // @[Mux.scala:30:73] wire _nodeOut_a_valid_T_3 = _nodeOut_a_valid_T_1 | _nodeOut_a_valid_T_2; // @[Mux.scala:30:73] wire _nodeOut_a_valid_WIRE = _nodeOut_a_valid_T_3; // @[Mux.scala:30:73] assign _nodeOut_a_valid_T_4 = idle ? _nodeOut_a_valid_T : _nodeOut_a_valid_WIRE; // @[Mux.scala:30:73] assign nodeOut_a_valid = _nodeOut_a_valid_T_4; // @[Arbiter.scala:96:24] wire [2:0] _nodeOut_a_bits_WIRE_10; // @[Mux.scala:30:73] assign nodeOut_a_bits_opcode = _nodeOut_a_bits_WIRE_opcode; // @[Mux.scala:30:73] wire [3:0] _nodeOut_a_bits_WIRE_8; // @[Mux.scala:30:73] assign nodeOut_a_bits_size = _nodeOut_a_bits_WIRE_size; // @[Mux.scala:30:73] wire [5:0] _nodeOut_a_bits_WIRE_7; // @[Mux.scala:30:73] assign nodeOut_a_bits_source = _nodeOut_a_bits_WIRE_source; // @[Mux.scala:30:73] wire [31:0] _nodeOut_a_bits_WIRE_6; // @[Mux.scala:30:73] assign nodeOut_a_bits_address = _nodeOut_a_bits_WIRE_address; // @[Mux.scala:30:73] wire [7:0] _nodeOut_a_bits_WIRE_3; // @[Mux.scala:30:73] assign nodeOut_a_bits_mask = _nodeOut_a_bits_WIRE_mask; // @[Mux.scala:30:73] wire [63:0] _nodeOut_a_bits_WIRE_2; // @[Mux.scala:30:73] assign nodeOut_a_bits_data = _nodeOut_a_bits_WIRE_data; // @[Mux.scala:30:73] wire [63:0] _nodeOut_a_bits_T_4 = muxState_1 ? wOut_bits_data : 64'h0; // @[Mux.scala:30:73] wire [63:0] _nodeOut_a_bits_T_5 = _nodeOut_a_bits_T_4; // @[Mux.scala:30:73] assign _nodeOut_a_bits_WIRE_2 = _nodeOut_a_bits_T_5; // @[Mux.scala:30:73] assign _nodeOut_a_bits_WIRE_data = _nodeOut_a_bits_WIRE_2; // @[Mux.scala:30:73] wire [7:0] _nodeOut_a_bits_T_6 = muxState_0 ? rOut_bits_mask : 8'h0; // @[Mux.scala:30:73] wire [7:0] _nodeOut_a_bits_T_7 = muxState_1 ? wOut_bits_mask : 8'h0; // @[Mux.scala:30:73] wire [7:0] _nodeOut_a_bits_T_8 = _nodeOut_a_bits_T_6 | _nodeOut_a_bits_T_7; // @[Mux.scala:30:73] assign _nodeOut_a_bits_WIRE_3 = _nodeOut_a_bits_T_8; // @[Mux.scala:30:73] assign _nodeOut_a_bits_WIRE_mask = _nodeOut_a_bits_WIRE_3; // @[Mux.scala:30:73] wire [31:0] _nodeOut_a_bits_T_9 = muxState_0 ? rOut_bits_address : 32'h0; // @[Mux.scala:30:73] wire [31:0] _nodeOut_a_bits_T_10 = muxState_1 ? wOut_bits_address : 32'h0; // @[Mux.scala:30:73] wire [31:0] _nodeOut_a_bits_T_11 = _nodeOut_a_bits_T_9 | _nodeOut_a_bits_T_10; // @[Mux.scala:30:73] assign _nodeOut_a_bits_WIRE_6 = _nodeOut_a_bits_T_11; // @[Mux.scala:30:73] assign _nodeOut_a_bits_WIRE_address = _nodeOut_a_bits_WIRE_6; // @[Mux.scala:30:73] wire [5:0] _nodeOut_a_bits_T_12 = muxState_0 ? rOut_bits_source : 6'h0; // @[Mux.scala:30:73] wire [5:0] _nodeOut_a_bits_T_13 = muxState_1 ? wOut_bits_source : 6'h0; // @[Mux.scala:30:73] wire [5:0] _nodeOut_a_bits_T_14 = _nodeOut_a_bits_T_12 | _nodeOut_a_bits_T_13; // @[Mux.scala:30:73] assign _nodeOut_a_bits_WIRE_7 = _nodeOut_a_bits_T_14; // @[Mux.scala:30:73] assign _nodeOut_a_bits_WIRE_source = _nodeOut_a_bits_WIRE_7; // @[Mux.scala:30:73] wire [3:0] _nodeOut_a_bits_T_15 = muxState_0 ? rOut_bits_size : 4'h0; // @[Mux.scala:30:73] wire [3:0] _nodeOut_a_bits_T_16 = muxState_1 ? wOut_bits_size : 4'h0; // @[Mux.scala:30:73] wire [3:0] _nodeOut_a_bits_T_17 = _nodeOut_a_bits_T_15 | _nodeOut_a_bits_T_16; // @[Mux.scala:30:73] assign _nodeOut_a_bits_WIRE_8 = _nodeOut_a_bits_T_17; // @[Mux.scala:30:73] assign _nodeOut_a_bits_WIRE_size = _nodeOut_a_bits_WIRE_8; // @[Mux.scala:30:73] wire [2:0] _nodeOut_a_bits_T_21 = {muxState_0, 2'h0}; // @[Mux.scala:30:73] wire [2:0] _nodeOut_a_bits_T_22 = {2'h0, muxState_1}; // @[Mux.scala:30:73] wire [2:0] _nodeOut_a_bits_T_23 = _nodeOut_a_bits_T_21 | _nodeOut_a_bits_T_22; // @[Mux.scala:30:73] assign _nodeOut_a_bits_WIRE_10 = _nodeOut_a_bits_T_23; // @[Mux.scala:30:73] assign _nodeOut_a_bits_WIRE_opcode = _nodeOut_a_bits_WIRE_10; // @[Mux.scala:30:73] wire _okB_valid_T_3; // @[UnsafeAXI4ToTL.scala:234:72] wire [1:0] dResp; // @[UnsafeAXI4ToTL.scala:222:25] wire [3:0] okB_bits_id; // @[UnsafeAXI4ToTL.scala:219:21] wire [1:0] okB_bits_resp; // @[UnsafeAXI4ToTL.scala:219:21] wire okB_ready; // @[UnsafeAXI4ToTL.scala:219:21] wire okB_valid; // @[UnsafeAXI4ToTL.scala:219:21] wire [3:0] okR_bits_id; // @[UnsafeAXI4ToTL.scala:220:21] wire [63:0] okR_bits_data; // @[UnsafeAXI4ToTL.scala:220:21] wire [1:0] okR_bits_resp; // @[UnsafeAXI4ToTL.scala:220:21] wire okR_bits_last; // @[UnsafeAXI4ToTL.scala:220:21] wire okR_ready; // @[UnsafeAXI4ToTL.scala:220:21] wire okR_valid; // @[UnsafeAXI4ToTL.scala:220:21] wire _dResp_T = nodeOut_d_bits_denied | nodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17] assign dResp = {_dResp_T, 1'h0}; // @[UnsafeAXI4ToTL.scala:222:{25,44}] assign okB_bits_resp = dResp; // @[UnsafeAXI4ToTL.scala:219:21, :222:25] wire dHasData = nodeOut_d_bits_opcode[0]; // @[Edges.scala:106:36] wire r_beats1_opdata = nodeOut_d_bits_opcode[0]; // @[Edges.scala:106:36] wire dNumBeats1_opdata = nodeOut_d_bits_opcode[0]; // @[Edges.scala:106:36] wire _T_18 = nodeOut_d_ready & nodeOut_d_valid; // @[Decoupled.scala:51:35] wire [26:0] _GEN_6 = 27'hFFF << nodeOut_d_bits_size; // @[package.scala:243:71] wire [26:0] _r_beats1_decode_T; // @[package.scala:243:71] assign _r_beats1_decode_T = _GEN_6; // @[package.scala:243:71] wire [26:0] _dNumBeats1_decode_T; // @[package.scala:243:71] assign _dNumBeats1_decode_T = _GEN_6; // @[package.scala:243:71] wire [11:0] _r_beats1_decode_T_1 = _r_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _r_beats1_decode_T_2 = ~_r_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] r_beats1_decode = _r_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire [8:0] r_beats1 = r_beats1_opdata ? r_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] r_counter; // @[Edges.scala:229:27] wire [9:0] _r_counter1_T = {1'h0, r_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] r_counter1 = _r_counter1_T[8:0]; // @[Edges.scala:230:28] wire _dFirst = r_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _r_last_T = r_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _r_last_T_1 = r_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire dLast = _r_last_T | _r_last_T_1; // @[Edges.scala:232:{25,33,43}] wire _dDone = dLast & _T_18; // @[Decoupled.scala:51:35] wire [8:0] _r_count_T = ~r_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] dCount = r_beats1 & _r_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _r_counter_T = _dFirst ? r_beats1 : r_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] _dNumBeats1_decode_T_1 = _dNumBeats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _dNumBeats1_decode_T_2 = ~_dNumBeats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] dNumBeats1_decode = _dNumBeats1_decode_T_2[11:3]; // @[package.scala:243:46] wire [8:0] dNumBeats1 = dNumBeats1_opdata ? dNumBeats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [31:0] _writeEarlyAck_T = 32'h1 << strippedResponseSourceId; // @[OneHot.scala:58:35] wire [31:0] _writeEarlyAck_T_1 = _writeEarlyAck_T & usedWriteIds; // @[OneHot.scala:58:35] wire writeEarlyAck = _writeEarlyAck_T_1 == 32'h0; // @[UnsafeAXI4ToTL.scala:229:{63,79}] wire _nodeOut_d_ready_T = ~writeEarlyAck; // @[UnsafeAXI4ToTL.scala:229:79, :231:103] wire _nodeOut_d_ready_T_1 = okB_ready & _nodeOut_d_ready_T; // @[UnsafeAXI4ToTL.scala:219:21, :231:{100,103}] assign _nodeOut_d_ready_T_2 = dHasData ? _listBuffer_ioResponse_ready : _nodeOut_d_ready_T_1; // @[Edges.scala:106:36] assign nodeOut_d_ready = _nodeOut_d_ready_T_2; // @[MixedNode.scala:542:17] wire _okB_valid_T = ~dHasData; // @[Edges.scala:106:36] wire _okB_valid_T_1 = nodeOut_d_valid & _okB_valid_T; // @[MixedNode.scala:542:17] wire _okB_valid_T_2 = ~writeEarlyAck; // @[UnsafeAXI4ToTL.scala:229:79, :231:103, :234:75] assign _okB_valid_T_3 = _okB_valid_T_1 & _okB_valid_T_2; // @[UnsafeAXI4ToTL.scala:234:{59,72,75}] assign okB_valid = _okB_valid_T_3; // @[UnsafeAXI4ToTL.scala:219:21, :234:72] wire _listBuffer_ioResponse_valid_T = nodeOut_d_valid & dHasData; // @[Edges.scala:106:36] wire _T_20 = wOut_ready & wOut_valid; // @[Decoupled.scala:51:35] wire [26:0] _beats1_decode_T = 27'hFFF << wOut_bits_size; // @[package.scala:243:71] wire [11:0] _beats1_decode_T_1 = _beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _beats1_decode_T_2 = ~_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] beats1_decode = _beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire [8:0] beats1 = beats1_decode; // @[Edges.scala:220:59, :221:14] reg [8:0] counter; // @[Edges.scala:229:27] wire [9:0] _counter1_T = {1'h0, counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] counter1 = _counter1_T[8:0]; // @[Edges.scala:230:28] wire first = counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _last_T = counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _last_T_1 = beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire last = _last_T | _last_T_1; // @[Edges.scala:232:{25,33,43}] wire done = last & _T_20; // @[Decoupled.scala:51:35] wire [8:0] _count_T = ~counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] count = beats1 & _count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _counter_T = first ? beats1 : counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] assign usedWriteIdsSet = done ? freeWriteIdOH : 32'h0; // @[package.scala:88:42] wire [31:0] _usedWriteIdsClr_T = 32'h1 << usedWriteIdsClr_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [31:0] _usedWriteIdsClr_T_1 = _usedWriteIdsClr_T; // @[OneHot.scala:65:{12,27}] assign usedWriteIdsClr = okB_ready & okB_valid ? _usedWriteIdsClr_T_1 : 32'h0; // @[OneHot.scala:65:27] assign nodeIn_r_valid = nodeIn_r_irr_valid; // @[Decoupled.scala:401:19] assign nodeIn_r_bits_id = nodeIn_r_irr_bits_id; // @[Decoupled.scala:401:19] assign nodeIn_r_bits_data = nodeIn_r_irr_bits_data; // @[Decoupled.scala:401:19] assign nodeIn_r_bits_resp = nodeIn_r_irr_bits_resp; // @[Decoupled.scala:401:19] assign nodeIn_r_bits_last = nodeIn_r_irr_bits_last; // @[Decoupled.scala:401:19] assign nodeIn_b_valid = nodeIn_b_irr_valid; // @[Decoupled.scala:401:19] assign nodeIn_b_bits_id = nodeIn_b_irr_bits_id; // @[Decoupled.scala:401:19] assign nodeIn_b_bits_resp = nodeIn_b_irr_bits_resp; // @[Decoupled.scala:401:19]
Generate the Verilog code corresponding to the following Chisel files. File Dispatch.scala: package saturn.frontend import chisel3._ import chisel3.util._ import chisel3.experimental.dataview._ import org.chipsalliance.cde.config._ import freechips.rocketchip.tile._ import freechips.rocketchip.util._ import saturn.common._ import saturn.insns._ class VectorDispatcher(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val io = IO(new Bundle { val issue = Flipped(Decoupled(new VectorIssueInst)) val mem = Decoupled(new VectorMemMacroOp) val dis = Decoupled(new VectorIssueInst) val scalar_resp = Decoupled(new ScalarWrite) val vat_release = Input(Vec(nRelease, Valid(UInt(vParams.vatSz.W)))) val vat_head = Output(UInt(vParams.vatSz.W)) val vat_tail = Output(UInt(vParams.vatSz.W)) }) val debug_id_ctr = RegInit(0.U(debugIdSz.W)) val vat_valids = RegInit(VecInit.fill(1 << vParams.vatSz)(false.B)) val vat_tail = RegInit(0.U(vParams.vatSz.W)) val vat_head = RegInit(0.U(vParams.vatSz.W)) def vatOlder(i0: UInt, i1: UInt) = cqOlder(i0, i1, vat_tail) val vat_available = !vat_valids(vat_tail) val vat_available_count = PopCount(~vat_valids.asUInt) val vat_head_incr = WireInit(false.B) when (vat_head_incr) { vat_head := vat_head + 1.U } when (io.dis.fire) { assert(!vat_valids(vat_tail)) vat_valids(vat_tail) := true.B vat_tail := vat_tail + 1.U debug_id_ctr := debug_id_ctr + 1.U } when (vat_tail =/= vat_head && !vat_valids(vat_head)) { vat_head_incr := true.B } val issue_inst = WireInit(io.issue.bits) issue_inst.vat := vat_tail issue_inst.debug_id := debug_id_ctr val hwacha_limiter = vParams.hwachaLimiter.map(n => Module(new HwachaLimiter(n))) hwacha_limiter.foreach { h => h.io.inst := issue_inst h.io.fire := io.issue.fire h.io.vat_release.foreach(_ := false.B) } val hwacha_block = hwacha_limiter.map(_.io.block).getOrElse(false.B) io.issue.ready := vat_available && io.dis.ready && (!issue_inst.vmu || io.mem.ready) && !hwacha_block io.dis.valid := vat_available && io.issue.valid && (!issue_inst.vmu || io.mem.ready) && !hwacha_block io.mem.valid := vat_available && io.issue.valid && io.dis.ready && issue_inst.vmu && !hwacha_block io.vat_tail := vat_tail io.vat_head := vat_head when ((io.issue.bits.funct3 === OPMVV && io.issue.bits.opmf6 === OPMFunct6.wrxunary0 && io.issue.bits.rs1 === 0.U) || (io.issue.bits.funct3 === OPFVV && io.issue.bits.opff6 === OPFFunct6.wrfunary0 && io.issue.bits.rs1 === 0.U)) { issue_inst.vconfig.vl := 1.U issue_inst.vstart := 0.U } // Strided with stride = 1 << eew is just unit-strided when (io.issue.bits.mop === mopStrided && io.issue.bits.rs2_data === ((io.issue.bits.nf +& 1.U) << io.issue.bits.mem_elem_size)) { issue_inst.mop := mopUnit } io.scalar_resp.valid := false.B io.scalar_resp.bits.fp := false.B io.scalar_resp.bits.rd := io.issue.bits.rd io.scalar_resp.bits.size := 3.U io.scalar_resp.bits.data := Mux(io.issue.bits.rs1(0), ~(0.U(xLen.W)), // vfirst 0.U // vpopc ) when (issue_inst.vconfig.vl <= issue_inst.vstart && !(issue_inst.funct3 === OPIVI && issue_inst.opif6 === OPIFunct6.mvnrr)) { io.issue.ready := true.B io.mem.valid := false.B io.dis.valid := false.B when (io.issue.bits.funct3 === OPMVV && io.issue.bits.opmf6 === OPMFunct6.wrxunary0) { io.issue.ready := io.scalar_resp.ready io.scalar_resp.valid := io.issue.valid } } io.dis.bits := issue_inst io.mem.bits.base_offset := issue_inst.rs1_data io.mem.bits.stride := issue_inst.rs2_data io.mem.bits.page := issue_inst.page io.mem.bits.vstart := issue_inst.vstart io.mem.bits.segstart := issue_inst.segstart io.mem.bits.segend := issue_inst.segend io.mem.bits.vl := issue_inst.vconfig.vl io.mem.bits.mop := issue_inst.mop io.mem.bits.vm := issue_inst.vm io.mem.bits.nf := issue_inst.nf io.mem.bits.idx_size := issue_inst.mem_idx_size io.mem.bits.elem_size := issue_inst.mem_elem_size io.mem.bits.whole_reg := issue_inst.umop === lumopWhole && issue_inst.orig_mop === mopUnit io.mem.bits.store := issue_inst.bits(5) io.mem.bits.fast_sg := issue_inst.fast_sg io.mem.bits.debug_id := issue_inst.debug_id for (r <- io.vat_release) { when (r.valid) { assert(vat_valids(r.bits)) when (r.bits === vat_head) { vat_head_incr := true.B } vat_valids(r.bits) := false.B hwacha_limiter.foreach(_.io.vat_release(r.bits) := true.B) } } }
module VectorDispatcher( // @[Dispatch.scala:13:7] input clock, // @[Dispatch.scala:13:7] input reset, // @[Dispatch.scala:13:7] output io_issue_ready, // @[Dispatch.scala:14:14] input io_issue_valid, // @[Dispatch.scala:14:14] input [31:0] io_issue_bits_bits, // @[Dispatch.scala:14:14] input [8:0] io_issue_bits_vconfig_vl, // @[Dispatch.scala:14:14] input [2:0] io_issue_bits_vconfig_vtype_vsew, // @[Dispatch.scala:14:14] input io_issue_bits_vconfig_vtype_vlmul_sign, // @[Dispatch.scala:14:14] input [1:0] io_issue_bits_vconfig_vtype_vlmul_mag, // @[Dispatch.scala:14:14] input [7:0] io_issue_bits_vstart, // @[Dispatch.scala:14:14] input [2:0] io_issue_bits_segstart, // @[Dispatch.scala:14:14] input [2:0] io_issue_bits_segend, // @[Dispatch.scala:14:14] input [63:0] io_issue_bits_rs1_data, // @[Dispatch.scala:14:14] input [63:0] io_issue_bits_rs2_data, // @[Dispatch.scala:14:14] input [19:0] io_issue_bits_page, // @[Dispatch.scala:14:14] input [2:0] io_issue_bits_rm, // @[Dispatch.scala:14:14] input [1:0] io_issue_bits_emul, // @[Dispatch.scala:14:14] input [1:0] io_issue_bits_mop, // @[Dispatch.scala:14:14] input io_mem_ready, // @[Dispatch.scala:14:14] output io_mem_valid, // @[Dispatch.scala:14:14] output [15:0] io_mem_bits_debug_id, // @[Dispatch.scala:14:14] output [11:0] io_mem_bits_base_offset, // @[Dispatch.scala:14:14] output [19:0] io_mem_bits_page, // @[Dispatch.scala:14:14] output [11:0] io_mem_bits_stride, // @[Dispatch.scala:14:14] output [2:0] io_mem_bits_segstart, // @[Dispatch.scala:14:14] output [2:0] io_mem_bits_segend, // @[Dispatch.scala:14:14] output [7:0] io_mem_bits_vstart, // @[Dispatch.scala:14:14] output [8:0] io_mem_bits_vl, // @[Dispatch.scala:14:14] output [1:0] io_mem_bits_mop, // @[Dispatch.scala:14:14] output io_mem_bits_vm, // @[Dispatch.scala:14:14] output [2:0] io_mem_bits_nf, // @[Dispatch.scala:14:14] output [1:0] io_mem_bits_idx_size, // @[Dispatch.scala:14:14] output [1:0] io_mem_bits_elem_size, // @[Dispatch.scala:14:14] output io_mem_bits_whole_reg, // @[Dispatch.scala:14:14] output io_mem_bits_store, // @[Dispatch.scala:14:14] input io_dis_ready, // @[Dispatch.scala:14:14] output io_dis_valid, // @[Dispatch.scala:14:14] output [31:0] io_dis_bits_bits, // @[Dispatch.scala:14:14] output [8:0] io_dis_bits_vconfig_vl, // @[Dispatch.scala:14:14] output [2:0] io_dis_bits_vconfig_vtype_vsew, // @[Dispatch.scala:14:14] output io_dis_bits_vconfig_vtype_vlmul_sign, // @[Dispatch.scala:14:14] output [1:0] io_dis_bits_vconfig_vtype_vlmul_mag, // @[Dispatch.scala:14:14] output [7:0] io_dis_bits_vstart, // @[Dispatch.scala:14:14] output [2:0] io_dis_bits_segstart, // @[Dispatch.scala:14:14] output [2:0] io_dis_bits_segend, // @[Dispatch.scala:14:14] output [63:0] io_dis_bits_rs1_data, // @[Dispatch.scala:14:14] output [4:0] io_dis_bits_vat, // @[Dispatch.scala:14:14] output [2:0] io_dis_bits_rm, // @[Dispatch.scala:14:14] output [1:0] io_dis_bits_emul, // @[Dispatch.scala:14:14] output [15:0] io_dis_bits_debug_id, // @[Dispatch.scala:14:14] output [1:0] io_dis_bits_mop, // @[Dispatch.scala:14:14] input io_scalar_resp_ready, // @[Dispatch.scala:14:14] output io_scalar_resp_valid, // @[Dispatch.scala:14:14] output [63:0] io_scalar_resp_bits_data, // @[Dispatch.scala:14:14] output [4:0] io_scalar_resp_bits_rd, // @[Dispatch.scala:14:14] input io_vat_release_0_valid, // @[Dispatch.scala:14:14] input [4:0] io_vat_release_0_bits, // @[Dispatch.scala:14:14] input io_vat_release_1_valid, // @[Dispatch.scala:14:14] input [4:0] io_vat_release_1_bits, // @[Dispatch.scala:14:14] input io_vat_release_2_valid, // @[Dispatch.scala:14:14] input [4:0] io_vat_release_2_bits, // @[Dispatch.scala:14:14] output [4:0] io_vat_head, // @[Dispatch.scala:14:14] output [4:0] io_vat_tail // @[Dispatch.scala:14:14] ); wire io_dis_valid_0; // @[Dispatch.scala:65:{18,53}, :91:127, :94:18] reg [15:0] debug_id_ctr; // @[Dispatch.scala:27:29] reg vat_valids_0; // @[Dispatch.scala:28:27] reg vat_valids_1; // @[Dispatch.scala:28:27] reg vat_valids_2; // @[Dispatch.scala:28:27] reg vat_valids_3; // @[Dispatch.scala:28:27] reg vat_valids_4; // @[Dispatch.scala:28:27] reg vat_valids_5; // @[Dispatch.scala:28:27] reg vat_valids_6; // @[Dispatch.scala:28:27] reg vat_valids_7; // @[Dispatch.scala:28:27] reg vat_valids_8; // @[Dispatch.scala:28:27] reg vat_valids_9; // @[Dispatch.scala:28:27] reg vat_valids_10; // @[Dispatch.scala:28:27] reg vat_valids_11; // @[Dispatch.scala:28:27] reg vat_valids_12; // @[Dispatch.scala:28:27] reg vat_valids_13; // @[Dispatch.scala:28:27] reg vat_valids_14; // @[Dispatch.scala:28:27] reg vat_valids_15; // @[Dispatch.scala:28:27] reg vat_valids_16; // @[Dispatch.scala:28:27] reg vat_valids_17; // @[Dispatch.scala:28:27] reg vat_valids_18; // @[Dispatch.scala:28:27] reg vat_valids_19; // @[Dispatch.scala:28:27] reg vat_valids_20; // @[Dispatch.scala:28:27] reg vat_valids_21; // @[Dispatch.scala:28:27] reg vat_valids_22; // @[Dispatch.scala:28:27] reg vat_valids_23; // @[Dispatch.scala:28:27] reg vat_valids_24; // @[Dispatch.scala:28:27] reg vat_valids_25; // @[Dispatch.scala:28:27] reg vat_valids_26; // @[Dispatch.scala:28:27] reg vat_valids_27; // @[Dispatch.scala:28:27] reg vat_valids_28; // @[Dispatch.scala:28:27] reg vat_valids_29; // @[Dispatch.scala:28:27] reg vat_valids_30; // @[Dispatch.scala:28:27] reg vat_valids_31; // @[Dispatch.scala:28:27] reg [4:0] vat_tail; // @[Dispatch.scala:29:25] reg [4:0] vat_head; // @[Dispatch.scala:30:25] wire [31:0] _GEN = {{vat_valids_31}, {vat_valids_30}, {vat_valids_29}, {vat_valids_28}, {vat_valids_27}, {vat_valids_26}, {vat_valids_25}, {vat_valids_24}, {vat_valids_23}, {vat_valids_22}, {vat_valids_21}, {vat_valids_20}, {vat_valids_19}, {vat_valids_18}, {vat_valids_17}, {vat_valids_16}, {vat_valids_15}, {vat_valids_14}, {vat_valids_13}, {vat_valids_12}, {vat_valids_11}, {vat_valids_10}, {vat_valids_9}, {vat_valids_8}, {vat_valids_7}, {vat_valids_6}, {vat_valids_5}, {vat_valids_4}, {vat_valids_3}, {vat_valids_2}, {vat_valids_1}, {vat_valids_0}}; // @[Dispatch.scala:28:27, :32:23] wire _GEN_0 = _GEN[vat_tail]; // @[Dispatch.scala:29:25, :32:23] wire _GEN_1 = io_dis_ready & io_dis_valid_0; // @[Decoupled.scala:51:35] wire _io_mem_valid_T = ~_GEN_0 & io_issue_valid; // @[Dispatch.scala:32:23, :65:35] wire _GEN_2 = io_issue_bits_bits[14:12] == 3'h2; // @[Dispatch.scala:71:31] wire [6:0] _GEN_3 = {1'h0, io_issue_bits_bits[31:26]}; // @[Dispatch.scala:13:7] wire _GEN_4 = (_GEN_2 | io_issue_bits_bits[14:12] == 3'h6 ? _GEN_3 : 7'h40) == 7'h10; // @[Dispatch.scala:71:{31,64}] wire _GEN_5 = io_issue_bits_bits[19:15] == 5'h0; // @[Dispatch.scala:71:109] wire _GEN_6 = io_issue_bits_bits[14:12] == 3'h1; // @[Dispatch.scala:72:31] wire _GEN_7 = _GEN_2 & _GEN_4 & _GEN_5 | _GEN_6 & (_GEN_6 | io_issue_bits_bits[14:12] == 3'h5 ? _GEN_3 : 7'h40) == 7'h10 & _GEN_5; // @[Dispatch.scala:71:{31,41,64,88,109,118}, :72:{31,41,64,88}] wire [8:0] issue_inst_vconfig_vl = _GEN_7 ? 9'h1 : io_issue_bits_vconfig_vl; // @[Dispatch.scala:51:28, :71:118, :72:119, :73:27] wire [7:0] issue_inst_vstart = _GEN_7 ? 8'h0 : io_issue_bits_vstart; // @[Dispatch.scala:51:28, :71:118, :72:119, :74:23] wire [1:0] issue_inst_mop = io_issue_bits_mop == 2'h2 & io_issue_bits_rs2_data == {53'h0, {7'h0, {1'h0, io_issue_bits_bits[31:29]} + 4'h1} << (io_issue_bits_mop[0] ? io_issue_bits_vconfig_vtype_vsew : {1'h0, io_issue_bits_bits[13:12]})} ? 2'h0 : io_issue_bits_mop; // @[Dispatch.scala:13:7, :51:28, :78:{27,42,68,91,99,132}, :79:20] wire _GEN_8 = io_issue_bits_bits[14:12] == 3'h3; // @[Dispatch.scala:91:75] wire _GEN_9 = issue_inst_vconfig_vl <= {1'h0, issue_inst_vstart} & ~(_GEN_8 & (io_issue_bits_bits[14:12] == 3'h0 | _GEN_8 | io_issue_bits_bits[14:12] == 3'h4 ? _GEN_3 : 7'h40) == 7'h27); // @[Dispatch.scala:13:7, :51:28, :72:119, :73:27, :74:23, :78:91, :91:{31,52,55,75,85,105}] assign io_dis_valid_0 = ~_GEN_9 & _io_mem_valid_T & (~(io_issue_bits_bits[6:0] == 7'h7 | io_issue_bits_bits[6:0] == 7'h27) | io_mem_ready); // @[Dispatch.scala:65:{18,35,53,57,73}, :66:18, :91:{52,127}, :93:18, :94:18] wire _GEN_10 = _GEN_2 & _GEN_4; // @[Dispatch.scala:71:{31,64}, :95:42] wire _GEN_11 = io_vat_release_0_valid & ~reset; // @[Dispatch.scala:41:11, :123:13] wire _GEN_12 = io_vat_release_1_valid & ~reset; // @[Dispatch.scala:41:11, :123:13] wire _GEN_13 = io_vat_release_2_valid & ~reset; // @[Dispatch.scala:41:11, :123:13]
Generate the Verilog code corresponding to the following Chisel files. File Transposer.scala: package gemmini import chisel3._ import chisel3.util._ import Util._ trait Transposer[T <: Data] extends Module { def dim: Int def dataType: T val io = IO(new Bundle { val inRow = Flipped(Decoupled(Vec(dim, dataType))) val outCol = Decoupled(Vec(dim, dataType)) }) } class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose val sMoveUp :: sMoveLeft :: Nil = Enum(2) val state = RegInit(sMoveUp) val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1) val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1) io.outCol.valid := 0.U io.inRow.ready := 0.U switch(state) { is(sMoveUp) { io.inRow.ready := upCounter <= dim.U io.outCol.valid := leftCounter > 0.U when(io.inRow.fire) { upCounter := upCounter + 1.U } when(upCounter === (dim-1).U) { state := sMoveLeft leftCounter := 0.U } when(io.outCol.fire) { leftCounter := leftCounter - 1.U } } is(sMoveLeft) { io.inRow.ready := leftCounter <= dim.U // TODO: this is naive io.outCol.valid := upCounter > 0.U when(leftCounter === (dim-1).U) { state := sMoveUp } when(io.inRow.fire) { leftCounter := leftCounter + 1.U upCounter := 0.U } when(io.outCol.fire) { upCounter := upCounter - 1.U } } } // Propagate input from bottom row to top row systolically in the move up phase // TODO: need to iterate over columns to connect Chisel values of type T // Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?) for (colIdx <- 0 until dim) { regArray.foldRight(io.inRow.bits(colIdx)) { case (regRow, prevReg) => when (state === sMoveUp) { regRow(colIdx) := prevReg } regRow(colIdx) } } // Propagate input from right side to left side systolically in the move left phase for (rowIdx <- 0 until dim) { regArrayT.foldRight(io.inRow.bits(rowIdx)) { case (regCol, prevReg) => when (state === sMoveLeft) { regCol(rowIdx) := prevReg } regCol(rowIdx) } } // Pull from the left side or the top side based on the state for (idx <- 0 until dim) { when (state === sMoveUp) { io.outCol.bits(idx) := regArray(0)(idx) }.elsewhen(state === sMoveLeft) { io.outCol.bits(idx) := regArrayT(0)(idx) }.otherwise { io.outCol.bits(idx) := DontCare } } } class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val LEFT_DIR = 0.U(1.W) val UP_DIR = 1.U(1.W) class PE extends Module { val io = IO(new Bundle { val inR = Input(dataType) val inD = Input(dataType) val outL = Output(dataType) val outU = Output(dataType) val dir = Input(UInt(1.W)) val en = Input(Bool()) }) val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en) io.outU := reg io.outL := reg } val pes = Seq.fill(dim,dim)(Module(new PE)) val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter val dir = RegInit(LEFT_DIR) // Wire up horizontal signals for (row <- 0 until dim; col <- 0 until dim) { val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL pes(row)(col).io.inR := right_in } // Wire up vertical signals for (row <- 0 until dim; col <- 0 until dim) { val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU pes(row)(col).io.inD := down_in } // Wire up global signals pes.flatten.foreach(_.io.dir := dir) pes.flatten.foreach(_.io.en := io.inRow.fire) io.outCol.valid := true.B io.inRow.ready := true.B val left_out = VecInit(pes.transpose.head.map(_.io.outL)) val up_out = VecInit(pes.head.map(_.io.outU)) io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out) when (io.inRow.fire) { counter := wrappingAdd(counter, 1.U, dim) } when (counter === (dim-1).U && io.inRow.fire) { dir := ~dir } } class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose // state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise val state = RegInit(0.U(1.W)) val countInc = io.inRow.fire || io.outCol.fire val (countValue, countWrap) = Counter(countInc, dim) io.inRow.ready := state === 0.U io.outCol.valid := state === 1.U for (i <- 0 until dim) { for (j <- 0 until dim) { when(countValue === i.U && io.inRow.fire) { regArray(i)(j) := io.inRow.bits(j) } } } for (i <- 0 until dim) { io.outCol.bits(i) := 0.U for (j <- 0 until dim) { when(countValue === j.U) { io.outCol.bits(i) := regArrayT(j)(i) } } } when (io.inRow.fire && countWrap) { state := 1.U } when (io.outCol.fire && countWrap) { state := 0.U } assert(!(state === 0.U) || !io.outCol.fire) assert(!(state === 1.U) || !io.inRow.fire) }
module PE_73( // @[Transposer.scala:100:9] input clock, // @[Transposer.scala:100:9] input reset, // @[Transposer.scala:100:9] input [7:0] io_inR, // @[Transposer.scala:101:16] input [7:0] io_inD, // @[Transposer.scala:101:16] output [7:0] io_outL, // @[Transposer.scala:101:16] output [7:0] io_outU, // @[Transposer.scala:101:16] input io_dir, // @[Transposer.scala:101:16] input io_en // @[Transposer.scala:101:16] ); wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9] wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9] wire io_dir_0 = io_dir; // @[Transposer.scala:100:9] wire io_en_0 = io_en; // @[Transposer.scala:100:9] wire [7:0] io_outL_0; // @[Transposer.scala:100:9] wire [7:0] io_outU_0; // @[Transposer.scala:100:9] wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36] wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}] reg [7:0] reg_0; // @[Transposer.scala:110:24] assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24] assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24] always @(posedge clock) begin // @[Transposer.scala:100:9] if (io_en_0) // @[Transposer.scala:100:9] reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}] always @(posedge) assign io_outL = io_outL_0; // @[Transposer.scala:100:9] assign io_outU = io_outU_0; // @[Transposer.scala:100:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File SourceB.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class SourceBRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val param = UInt(3.W) val tag = UInt(params.tagBits.W) val set = UInt(params.setBits.W) val clients = UInt(params.clientBits.W) } class SourceB(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val req = Flipped(Decoupled(new SourceBRequest(params))) val b = Decoupled(new TLBundleB(params.inner.bundle)) }) if (params.firstLevel) { // Tie off unused ports io.req.ready := true.B io.b.valid := false.B io.b.bits := DontCare } else { val remain = RegInit(0.U(params.clientBits.W)) val remain_set = WireInit(init = 0.U(params.clientBits.W)) val remain_clr = WireInit(init = 0.U(params.clientBits.W)) remain := (remain | remain_set) & ~remain_clr val busy = remain.orR val todo = Mux(busy, remain, io.req.bits.clients) val next = ~(leftOR(todo) << 1) & todo if (params.clientBits > 1) { params.ccover(PopCount(remain) > 1.U, "SOURCEB_MULTI_PROBE", "Had to probe more than one client") } assert (!io.req.valid || io.req.bits.clients =/= 0.U) io.req.ready := !busy when (io.req.fire) { remain_set := io.req.bits.clients } // No restrictions on the type of buffer used here val b = Wire(chiselTypeOf(io.b)) io.b <> params.micro.innerBuf.b(b) b.valid := busy || io.req.valid when (b.fire) { remain_clr := next } params.ccover(b.valid && !b.ready, "SOURCEB_STALL", "Backpressured when issuing a probe") val tag = Mux(!busy, io.req.bits.tag, RegEnable(io.req.bits.tag, io.req.fire)) val set = Mux(!busy, io.req.bits.set, RegEnable(io.req.bits.set, io.req.fire)) val param = Mux(!busy, io.req.bits.param, RegEnable(io.req.bits.param, io.req.fire)) b.bits.opcode := TLMessages.Probe b.bits.param := param b.bits.size := params.offsetBits .U b.bits.source := params.clientSource(next) b.bits.address := params.expandAddress(tag, set, 0.U) b.bits.mask := ~0.U(params.inner.manager.beatBytes.W) b.bits.data := 0.U b.bits.corrupt := false.B } } File Parameters.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property.cover import scala.math.{min,max} case class CacheParameters( level: Int, ways: Int, sets: Int, blockBytes: Int, beatBytes: Int, // inner hintsSkipProbe: Boolean) { require (ways > 0) require (sets > 0) require (blockBytes > 0 && isPow2(blockBytes)) require (beatBytes > 0 && isPow2(beatBytes)) require (blockBytes >= beatBytes) val blocks = ways * sets val sizeBytes = blocks * blockBytes val blockBeats = blockBytes/beatBytes } case class InclusiveCachePortParameters( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams) { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e)) } object InclusiveCachePortParameters { val none = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.none) val full = InclusiveCachePortParameters( a = BufferParams.default, b = BufferParams.default, c = BufferParams.default, d = BufferParams.default, e = BufferParams.default) // This removes feed-through paths from C=>A and A=>C val fullC = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.default, d = BufferParams.none, e = BufferParams.none) val flowAD = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.flow, e = BufferParams.none) val flowAE = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.flow) // For innerBuf: // SinkA: no restrictions, flows into scheduler+putbuffer // SourceB: no restrictions, flows out of scheduler // sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore // SourceD: no restrictions, flows out of bankedStore/regout // SinkE: no restrictions, flows into scheduler // // ... so while none is possible, you probably want at least flowAC to cut ready // from the scheduler delay and flowD to ease SourceD back-pressure // For outerBufer: // SourceA: must not be pipe, flows out of scheduler // SinkB: no restrictions, flows into scheduler // SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored // SinkD: no restrictions, flows into scheduler & bankedStore // SourceE: must not be pipe, flows out of scheduler // // ... AE take the channel ready into the scheduler, so you need at least flowAE } case class InclusiveCacheMicroParameters( writeBytes: Int, // backing store update granularity memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz) portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes dirReg: Boolean = false, innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE { require (writeBytes > 0 && isPow2(writeBytes)) require (memCycles > 0) require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant } case class InclusiveCacheControlParameters( address: BigInt, beatBytes: Int, bankedControl: Boolean) case class InclusiveCacheParameters( cache: CacheParameters, micro: InclusiveCacheMicroParameters, control: Boolean, inner: TLEdgeIn, outer: TLEdgeOut)(implicit val p: Parameters) { require (cache.ways > 1) require (cache.sets > 1 && isPow2(cache.sets)) require (micro.writeBytes <= inner.manager.beatBytes) require (micro.writeBytes <= outer.manager.beatBytes) require (inner.manager.beatBytes <= cache.blockBytes) require (outer.manager.beatBytes <= cache.blockBytes) // Require that all cached address ranges have contiguous blocks outer.manager.managers.flatMap(_.address).foreach { a => require (a.alignment >= cache.blockBytes) } // If we are the first level cache, we do not need to support inner-BCE val firstLevel = !inner.client.clients.exists(_.supports.probe) // If we are the last level cache, we do not need to support outer-B val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED) require (lastLevel) // Provision enough resources to achieve full throughput with missing single-beat accesses val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro) val secondary = max(mshrs, micro.memCycles - mshrs) val putLists = micro.memCycles // allow every request to be single beat val putBeats = max(2*cache.blockBeats, micro.memCycles) val relLists = 2 val relBeats = relLists*cache.blockBeats val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address)) val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_)) def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] = if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail) val addressMapping = bitOffsets(pickMask) val addressBits = addressMapping.size // println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}") val allClients = inner.client.clients.size val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size val clientBits = max(1, clientBitsRaw) val stateBits = 2 val wayBits = log2Ceil(cache.ways) val setBits = log2Ceil(cache.sets) val offsetBits = log2Ceil(cache.blockBytes) val tagBits = addressBits - setBits - offsetBits val putBits = log2Ceil(max(putLists, relLists)) require (tagBits > 0) require (offsetBits > 0) val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1 val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1 val innerMaskBits = inner.manager.beatBytes / micro.writeBytes val outerMaskBits = outer.manager.beatBytes / micro.writeBytes def clientBit(source: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse) } } def clientSource(bit: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U)) } } def parseAddress(x: UInt): (UInt, UInt, UInt) = { val offset = Cat(addressMapping.map(o => x(o,o)).reverse) val set = offset >> offsetBits val tag = set >> setBits (tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0)) } def widen(x: UInt, width: Int): UInt = { val y = x | 0.U(width.W) assert (y >> width === 0.U) y(width-1, 0) } def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = { val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits)) val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) } addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) } Cat(bits.reverse) } def restoreAddress(expanded: UInt): UInt = { val missingBits = flatAddresses .map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match .groupBy(_._1) .view .mapValues(_.map(_._2)) val muxMask = AddressDecoder(missingBits.values.toList) val mux = missingBits.toList.map { case (bits, addrs) => val widen = addrs.map(_.widen(~muxMask)) val matches = AddressSet .unify(widen.distinct) .map(_.contains(expanded)) .reduce(_ || _) (matches, bits.U) } expanded | Mux1H(mux) } def dirReg[T <: Data](x: T, en: Bool = true.B): T = { if (micro.dirReg) RegEnable(x, en) else x } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc) } object MetaData { val stateBits = 2 def INVALID: UInt = 0.U(stateBits.W) // way is empty def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch // Does a request need trunk? def needT(opcode: UInt, param: UInt): Bool = { !opcode(2) || (opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) || ((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB) } // Does a request prove the client need not be probed? def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = { // Acquire(toB) and Get => is N, so no probe // Acquire(*toT) => is N or B, but need T, so no probe // Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client // Put* => is N or B, so probe IS needed opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B) } def isToN(param: UInt): Bool = { param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN } def isToB(param: UInt): Bool = { param === TLPermissions.TtoB || param === TLPermissions.BtoB } } object InclusiveCacheParameters { val lfsrBits = 10 val L2ControlAddress = 0x2010000 val L2ControlSize = 0x1000 def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = { // We need 2-3 normal MSHRs to cover the Directory latency // To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats) } def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = // We need a dedicated MSHR for B+C each 2 + out_mshrs(cache, micro) } class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
module SourceB( // @[SourceB.scala:33:7] input clock, // @[SourceB.scala:33:7] input reset, // @[SourceB.scala:33:7] output io_req_ready, // @[SourceB.scala:35:14] input io_req_valid, // @[SourceB.scala:35:14] input [2:0] io_req_bits_param, // @[SourceB.scala:35:14] input [12:0] io_req_bits_tag, // @[SourceB.scala:35:14] input [9:0] io_req_bits_set, // @[SourceB.scala:35:14] input io_req_bits_clients, // @[SourceB.scala:35:14] input io_b_ready, // @[SourceB.scala:35:14] output io_b_valid, // @[SourceB.scala:35:14] output [1:0] io_b_bits_param, // @[SourceB.scala:35:14] output [31:0] io_b_bits_address // @[SourceB.scala:35:14] ); wire io_req_valid_0 = io_req_valid; // @[SourceB.scala:33:7] wire [2:0] io_req_bits_param_0 = io_req_bits_param; // @[SourceB.scala:33:7] wire [12:0] io_req_bits_tag_0 = io_req_bits_tag; // @[SourceB.scala:33:7] wire [9:0] io_req_bits_set_0 = io_req_bits_set; // @[SourceB.scala:33:7] wire io_req_bits_clients_0 = io_req_bits_clients; // @[SourceB.scala:33:7] wire io_b_ready_0 = io_b_ready; // @[SourceB.scala:33:7] wire _b_bits_address_base_T_2 = reset; // @[Parameters.scala:222:12] wire _b_bits_address_base_T_8 = reset; // @[Parameters.scala:222:12] wire _b_bits_address_base_T_14 = reset; // @[Parameters.scala:222:12] wire [2:0] io_b_bits_opcode = 3'h6; // @[SourceB.scala:33:7] wire [2:0] io_b_bits_size = 3'h6; // @[SourceB.scala:33:7] wire [2:0] b_bits_opcode = 3'h6; // @[SourceB.scala:65:17] wire [2:0] b_bits_size = 3'h6; // @[SourceB.scala:65:17] wire [6:0] io_b_bits_source = 7'h20; // @[SourceB.scala:33:7] wire [6:0] b_bits_source = 7'h20; // @[SourceB.scala:65:17] wire [15:0] io_b_bits_mask = 16'hFFFF; // @[SourceB.scala:33:7] wire [15:0] b_bits_mask = 16'hFFFF; // @[SourceB.scala:65:17] wire [15:0] _b_bits_mask_T = 16'hFFFF; // @[SourceB.scala:81:23] wire [127:0] io_b_bits_data = 128'h0; // @[SourceB.scala:33:7] wire [127:0] b_bits_data = 128'h0; // @[SourceB.scala:65:17] wire io_b_bits_corrupt = 1'h0; // @[SourceB.scala:33:7] wire b_bits_corrupt = 1'h0; // @[SourceB.scala:65:17] wire _b_bits_address_base_T = 1'h0; // @[Parameters.scala:222:15] wire _b_bits_address_base_T_4 = 1'h0; // @[Parameters.scala:222:12] wire _b_bits_address_base_T_6 = 1'h0; // @[Parameters.scala:222:15] wire _b_bits_address_base_T_10 = 1'h0; // @[Parameters.scala:222:12] wire _b_bits_address_base_T_12 = 1'h0; // @[Parameters.scala:222:15] wire _b_bits_address_base_T_16 = 1'h0; // @[Parameters.scala:222:12] wire [1:0] b_bits_address_hi_hi_hi_lo = 2'h0; // @[Parameters.scala:230:8] wire [5:0] b_bits_address_base_y_2 = 6'h0; // @[Parameters.scala:221:15] wire [5:0] _b_bits_address_base_T_17 = 6'h0; // @[Parameters.scala:223:6] wire _b_bits_address_base_T_1 = 1'h1; // @[Parameters.scala:222:24] wire _b_bits_address_base_T_7 = 1'h1; // @[Parameters.scala:222:24] wire _b_bits_address_base_T_13 = 1'h1; // @[Parameters.scala:222:24] wire _io_req_ready_T; // @[SourceB.scala:61:21] wire b_ready = io_b_ready_0; // @[SourceB.scala:33:7, :65:17] wire b_valid; // @[SourceB.scala:65:17] wire [1:0] b_bits_param; // @[SourceB.scala:65:17] wire [31:0] b_bits_address; // @[SourceB.scala:65:17] wire io_req_ready_0; // @[SourceB.scala:33:7] wire [1:0] io_b_bits_param_0; // @[SourceB.scala:33:7] wire [31:0] io_b_bits_address_0; // @[SourceB.scala:33:7] wire io_b_valid_0; // @[SourceB.scala:33:7] reg remain; // @[SourceB.scala:46:25] wire busy = remain; // @[SourceB.scala:46:25, :51:23] wire remain_set; // @[SourceB.scala:47:30] wire remain_clr; // @[SourceB.scala:48:30] wire _remain_T = remain | remain_set; // @[SourceB.scala:46:25, :47:30, :49:23] wire _remain_T_1 = ~remain_clr; // @[SourceB.scala:48:30, :49:39] wire _remain_T_2 = _remain_T & _remain_T_1; // @[SourceB.scala:49:{23,37,39}] wire todo = busy ? remain : io_req_bits_clients_0; // @[SourceB.scala:33:7, :46:25, :51:23, :52:19] wire _next_T = todo; // @[package.scala:254:17] wire [1:0] _next_T_1 = {_next_T, 1'h0}; // @[package.scala:254:17] wire [1:0] _next_T_2 = ~_next_T_1; // @[SourceB.scala:53:{16,31}] wire [1:0] next = {1'h0, _next_T_2[0] & todo}; // @[SourceB.scala:52:19, :53:{16,37}]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_24( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [25:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [25:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27] wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_44 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_46 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_50 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_52 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_56 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_58 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_62 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_64 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_68 = 1'h1; // @[Parameters.scala:56:32] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28] wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [25:0] _c_first_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_first_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_first_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_first_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_set_wo_ready_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_set_wo_ready_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_opcodes_set_interm_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_opcodes_set_interm_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_sizes_set_interm_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_sizes_set_interm_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_opcodes_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_opcodes_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_sizes_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_sizes_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_probe_ack_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_probe_ack_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_probe_ack_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_probe_ack_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _same_cycle_resp_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _same_cycle_resp_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _same_cycle_resp_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _same_cycle_resp_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _same_cycle_resp_WIRE_4_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _same_cycle_resp_WIRE_5_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54] wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52] wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79] wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51] wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35] wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35] wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34] wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34] wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34] wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34] wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_25 = io_in_a_bits_source_0[6:3]; // @[Monitor.scala:36:7] wire _source_ok_T_26 = _source_ok_T_25 == 4'h4; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_29 = source_ok_uncommonBits_4 < 3'h5; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_30 = _source_ok_T_28 & _source_ok_T_29; // @[Parameters.scala:54:67, :56:48, :57:20] wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31] wire _source_ok_T_31 = io_in_a_bits_source_0 == 7'h25; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_31; // @[Parameters.scala:1138:31] wire _source_ok_T_32 = io_in_a_bits_source_0 == 7'h28; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31] wire _source_ok_T_33 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_8 = _source_ok_T_33; // @[Parameters.scala:1138:31] wire _source_ok_T_34 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_35 = _source_ok_T_34 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_36 = _source_ok_T_35 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_37 = _source_ok_T_36 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_38 = _source_ok_T_37 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_39 = _source_ok_T_38 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_40 = _source_ok_T_39 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_40 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [25:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 26'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_4 = _uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_9 = _uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_14 = _uncommonBits_T_14[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_24 = _uncommonBits_T_24[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_29 = _uncommonBits_T_29[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_34 = _uncommonBits_T_34[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_39 = _uncommonBits_T_39[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_44 = _uncommonBits_T_44[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_49 = _uncommonBits_T_49[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_54 = _uncommonBits_T_54[2:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_41 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_41; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_42 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_48 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_54 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_60 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_43 = _source_ok_T_42 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_45 = _source_ok_T_43; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_47 = _source_ok_T_45; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_47; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_49 = _source_ok_T_48 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_51 = _source_ok_T_49; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_53 = _source_ok_T_51; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_53; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_55 = _source_ok_T_54 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_57 = _source_ok_T_55; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_59 = _source_ok_T_57; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_59; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_61 = _source_ok_T_60 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_63 = _source_ok_T_61; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_65 = _source_ok_T_63; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_65; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_66 = io_in_d_bits_source_0[6:3]; // @[Monitor.scala:36:7] wire _source_ok_T_67 = _source_ok_T_66 == 4'h4; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_69 = _source_ok_T_67; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_70 = source_ok_uncommonBits_9 < 3'h5; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_71 = _source_ok_T_69 & _source_ok_T_70; // @[Parameters.scala:54:67, :56:48, :57:20] wire _source_ok_WIRE_1_5 = _source_ok_T_71; // @[Parameters.scala:1138:31] wire _source_ok_T_72 = io_in_d_bits_source_0 == 7'h25; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_72; // @[Parameters.scala:1138:31] wire _source_ok_T_73 = io_in_d_bits_source_0 == 7'h28; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_73; // @[Parameters.scala:1138:31] wire _source_ok_T_74 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_8 = _source_ok_T_74; // @[Parameters.scala:1138:31] wire _source_ok_T_75 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_76 = _source_ok_T_75 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_77 = _source_ok_T_76 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_78 = _source_ok_T_77 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_79 = _source_ok_T_78 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_80 = _source_ok_T_79 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_81 = _source_ok_T_80 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_81 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire _T_1156 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1156; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1156; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [25:0] address; // @[Monitor.scala:391:22] wire _T_1229 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1229; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1229; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1229; // @[Decoupled.scala:51:35] wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [64:0] inflight; // @[Monitor.scala:614:27] reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [259:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [64:0] a_set; // @[Monitor.scala:626:34] wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [259:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [127:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1082 = _T_1156 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1082 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1082 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1082 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1082 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1082 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [64:0] d_clr; // @[Monitor.scala:664:34] wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_1128 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1128 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1097 = _T_1229 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1097 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1097 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1097 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [64:0] inflight_1; // @[Monitor.scala:726:35] wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [64:0] d_clr_1; // @[Monitor.scala:774:34] wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1200 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1200 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1182 = _T_1229 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1182 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1182 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1182 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113] wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_144( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19] wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File ALU.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util.{BitPat, Fill, Cat, Reverse, PriorityEncoderOH, PopCount, MuxLookup} import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.tile.CoreModule import freechips.rocketchip.util._ object ALU { val SZ_ALU_FN = 5 def FN_X = BitPat("b?????") def FN_ADD = 0.U def FN_SL = 1.U def FN_SEQ = 2.U def FN_SNE = 3.U def FN_XOR = 4.U def FN_SR = 5.U def FN_OR = 6.U def FN_AND = 7.U def FN_CZEQZ = 8.U def FN_CZNEZ = 9.U def FN_SUB = 10.U def FN_SRA = 11.U def FN_SLT = 12.U def FN_SGE = 13.U def FN_SLTU = 14.U def FN_SGEU = 15.U def FN_UNARY = 16.U def FN_ROL = 17.U def FN_ROR = 18.U def FN_BEXT = 19.U def FN_ANDN = 24.U def FN_ORN = 25.U def FN_XNOR = 26.U def FN_MAX = 28.U def FN_MIN = 29.U def FN_MAXU = 30.U def FN_MINU = 31.U def FN_MAXMIN = BitPat("b111??") // Mul/div reuse some integer FNs def FN_DIV = FN_XOR def FN_DIVU = FN_SR def FN_REM = FN_OR def FN_REMU = FN_AND def FN_MUL = FN_ADD def FN_MULH = FN_SL def FN_MULHSU = FN_SEQ def FN_MULHU = FN_SNE def isMulFN(fn: UInt, cmp: UInt) = fn(1,0) === cmp(1,0) def isSub(cmd: UInt) = cmd(3) def isCmp(cmd: UInt) = (cmd >= FN_SLT && cmd <= FN_SGEU) def isMaxMin(cmd: UInt) = (cmd >= FN_MAX && cmd <= FN_MINU) def cmpUnsigned(cmd: UInt) = cmd(1) def cmpInverted(cmd: UInt) = cmd(0) def cmpEq(cmd: UInt) = !cmd(3) def shiftReverse(cmd: UInt) = !cmd.isOneOf(FN_SR, FN_SRA, FN_ROR, FN_BEXT) def bwInvRs2(cmd: UInt) = cmd.isOneOf(FN_ANDN, FN_ORN, FN_XNOR) } import ALU._ abstract class AbstractALU(implicit p: Parameters) extends CoreModule()(p) { val io = IO(new Bundle { val dw = Input(UInt(SZ_DW.W)) val fn = Input(UInt(SZ_ALU_FN.W)) val in2 = Input(UInt(xLen.W)) val in1 = Input(UInt(xLen.W)) val out = Output(UInt(xLen.W)) val adder_out = Output(UInt(xLen.W)) val cmp_out = Output(Bool()) }) } class ALU(implicit p: Parameters) extends AbstractALU()(p) { // ADD, SUB val in2_inv = Mux(isSub(io.fn), ~io.in2, io.in2) val in1_xor_in2 = io.in1 ^ in2_inv val in1_and_in2 = io.in1 & in2_inv io.adder_out := io.in1 + in2_inv + isSub(io.fn) // SLT, SLTU val slt = Mux(io.in1(xLen-1) === io.in2(xLen-1), io.adder_out(xLen-1), Mux(cmpUnsigned(io.fn), io.in2(xLen-1), io.in1(xLen-1))) io.cmp_out := cmpInverted(io.fn) ^ Mux(cmpEq(io.fn), in1_xor_in2 === 0.U, slt) // SLL, SRL, SRA val (shamt, shin_r) = if (xLen == 32) (io.in2(4,0), io.in1) else { require(xLen == 64) val shin_hi_32 = Fill(32, isSub(io.fn) && io.in1(31)) val shin_hi = Mux(io.dw === DW_64, io.in1(63,32), shin_hi_32) val shamt = Cat(io.in2(5) & (io.dw === DW_64), io.in2(4,0)) (shamt, Cat(shin_hi, io.in1(31,0))) } val shin = Mux(shiftReverse(io.fn), Reverse(shin_r), shin_r) val shout_r = (Cat(isSub(io.fn) & shin(xLen-1), shin).asSInt >> shamt)(xLen-1,0) val shout_l = Reverse(shout_r) val shout = Mux(io.fn === FN_SR || io.fn === FN_SRA || io.fn === FN_BEXT, shout_r, 0.U) | Mux(io.fn === FN_SL, shout_l, 0.U) // CZEQZ, CZNEZ val in2_not_zero = io.in2.orR val cond_out = Option.when(usingConditionalZero)( Mux((io.fn === FN_CZEQZ && in2_not_zero) || (io.fn === FN_CZNEZ && !in2_not_zero), io.in1, 0.U) ) // AND, OR, XOR val logic = Mux(io.fn === FN_XOR || io.fn === FN_OR || io.fn === FN_ORN || io.fn === FN_XNOR, in1_xor_in2, 0.U) | Mux(io.fn === FN_OR || io.fn === FN_AND || io.fn === FN_ORN || io.fn === FN_ANDN, in1_and_in2, 0.U) val bext_mask = Mux(coreParams.useZbs.B && io.fn === FN_BEXT, 1.U, ~(0.U(xLen.W))) val shift_logic = (isCmp (io.fn) && slt) | logic | (shout & bext_mask) val shift_logic_cond = cond_out match { case Some(co) => shift_logic | co case _ => shift_logic } // CLZ, CTZ, CPOP val tz_in = MuxLookup((io.dw === DW_32) ## !io.in2(0), 0.U)(Seq( 0.U -> io.in1, 1.U -> Reverse(io.in1), 2.U -> 1.U ## io.in1(31,0), 3.U -> 1.U ## Reverse(io.in1(31,0)) )) val popc_in = Mux(io.in2(1), Mux(io.dw === DW_32, io.in1(31,0), io.in1), PriorityEncoderOH(1.U ## tz_in) - 1.U)(xLen-1,0) val count = PopCount(popc_in) val in1_bytes = io.in1.asTypeOf(Vec(xLen / 8, UInt(8.W))) val orcb = VecInit(in1_bytes.map(b => Fill(8, b =/= 0.U))).asUInt val rev8 = VecInit(in1_bytes.reverse).asUInt val unary = MuxLookup(io.in2(11,0), count)(Seq( 0x287.U -> orcb, (if (xLen == 32) 0x698 else 0x6b8).U -> rev8, 0x080.U -> io.in1(15,0), 0x604.U -> Fill(xLen-8, io.in1(7)) ## io.in1(7,0), 0x605.U -> Fill(xLen-16, io.in1(15)) ## io.in1(15,0) )) // MAX, MIN, MAXU, MINU val maxmin_out = Mux(io.cmp_out, io.in2, io.in1) // ROL, ROR val rot_shamt = Mux(io.dw === DW_32, 32.U, xLen.U) - shamt val rotin = Mux(io.fn(0), shin_r, Reverse(shin_r)) val rotout_r = (rotin >> rot_shamt)(xLen-1,0) val rotout_l = Reverse(rotout_r) val rotout = Mux(io.fn(0), rotout_r, rotout_l) | Mux(io.fn(0), shout_l, shout_r) val out = MuxLookup(io.fn, shift_logic_cond)(Seq( FN_ADD -> io.adder_out, FN_SUB -> io.adder_out ) ++ (if (coreParams.useZbb) Seq( FN_UNARY -> unary, FN_MAX -> maxmin_out, FN_MIN -> maxmin_out, FN_MAXU -> maxmin_out, FN_MINU -> maxmin_out, FN_ROL -> rotout, FN_ROR -> rotout, ) else Nil)) io.out := out if (xLen > 32) { require(xLen == 64) when (io.dw === DW_32) { io.out := Cat(Fill(32, out(31)), out(31,0)) } } }
module ALU_2( // @[ALU.scala:83:7] input clock, // @[ALU.scala:83:7] input reset, // @[ALU.scala:83:7] input io_dw, // @[ALU.scala:72:14] input [4:0] io_fn, // @[ALU.scala:72:14] input [63:0] io_in2, // @[ALU.scala:72:14] input [63:0] io_in1, // @[ALU.scala:72:14] output [63:0] io_out // @[ALU.scala:72:14] ); wire [7:0] in1_bytes_6; // @[ALU.scala:140:34] wire [7:0] in1_bytes_5; // @[ALU.scala:140:34] wire [7:0] in1_bytes_4; // @[ALU.scala:140:34] wire [7:0] in1_bytes_3; // @[ALU.scala:140:34] wire [7:0] in1_bytes_2; // @[ALU.scala:140:34] wire [7:0] in1_bytes_1; // @[ALU.scala:140:34] wire [7:0] in1_bytes_0; // @[ALU.scala:140:34] wire io_dw_0 = io_dw; // @[ALU.scala:83:7] wire [4:0] io_fn_0 = io_fn; // @[ALU.scala:83:7] wire [63:0] io_in2_0 = io_in2; // @[ALU.scala:83:7] wire [63:0] io_in1_0 = io_in1; // @[ALU.scala:83:7] wire [63:0] _bext_mask_T_2 = 64'hFFFFFFFFFFFFFFFF; // @[ALU.scala:122:70] wire [31:0] _tz_in_T_67 = 32'hFFFF; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_66 = 32'hFFFF0000; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_72 = 32'hFFFF0000; // @[ALU.scala:134:26] wire [23:0] _tz_in_T_75 = 24'hFFFF; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_76 = 32'hFFFF00; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_77 = 32'hFF00FF; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_82 = 32'hFF00FF00; // @[ALU.scala:134:26] wire [27:0] _tz_in_T_85 = 28'hFF00FF; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_86 = 32'hFF00FF0; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_87 = 32'hF0F0F0F; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_92 = 32'hF0F0F0F0; // @[ALU.scala:134:26] wire [29:0] _tz_in_T_95 = 30'hF0F0F0F; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_96 = 32'h3C3C3C3C; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_97 = 32'h33333333; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_102 = 32'hCCCCCCCC; // @[ALU.scala:134:26] wire [30:0] _tz_in_T_105 = 31'h33333333; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_106 = 32'h66666666; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_107 = 32'h55555555; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_112 = 32'hAAAAAAAA; // @[ALU.scala:134:26] wire [63:0] _shin_T_9 = 64'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_1 = 64'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_5 = 64'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_2 = 64'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_1 = 64'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_8 = 64'hFFFFFFFF00000000; // @[ALU.scala:106:46] wire [63:0] _shin_T_14 = 64'hFFFFFFFF00000000; // @[ALU.scala:106:46] wire [63:0] _shout_l_T = 64'hFFFFFFFF00000000; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_6 = 64'hFFFFFFFF00000000; // @[ALU.scala:108:24] wire [63:0] _tz_in_T_4 = 64'hFFFFFFFF00000000; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_10 = 64'hFFFFFFFF00000000; // @[ALU.scala:132:19] wire [63:0] _rotin_T_1 = 64'hFFFFFFFF00000000; // @[ALU.scala:156:44] wire [63:0] _rotin_T_7 = 64'hFFFFFFFF00000000; // @[ALU.scala:156:44] wire [63:0] _rotout_l_T = 64'hFFFFFFFF00000000; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_6 = 64'hFFFFFFFF00000000; // @[ALU.scala:158:25] wire [47:0] _shin_T_17 = 48'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [47:0] _shout_l_T_9 = 48'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [47:0] _tz_in_T_13 = 48'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [47:0] _rotin_T_10 = 48'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [47:0] _rotout_l_T_9 = 48'hFFFFFFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_18 = 64'hFFFFFFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_10 = 64'hFFFFFFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_14 = 64'hFFFFFFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_11 = 64'hFFFFFFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_10 = 64'hFFFFFFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_19 = 64'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_11 = 64'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_15 = 64'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_12 = 64'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_11 = 64'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_24 = 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_16 = 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_20 = 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_17 = 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_16 = 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [55:0] _shin_T_27 = 56'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [55:0] _shout_l_T_19 = 56'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [55:0] _tz_in_T_23 = 56'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [55:0] _rotin_T_20 = 56'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [55:0] _rotout_l_T_19 = 56'hFFFF0000FFFF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_28 = 64'hFFFF0000FFFF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_20 = 64'hFFFF0000FFFF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_24 = 64'hFFFF0000FFFF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_21 = 64'hFFFF0000FFFF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_20 = 64'hFFFF0000FFFF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_29 = 64'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_21 = 64'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_25 = 64'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_22 = 64'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_21 = 64'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_34 = 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_26 = 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_30 = 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_27 = 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_26 = 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [59:0] _shin_T_37 = 60'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [59:0] _shout_l_T_29 = 60'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [59:0] _tz_in_T_33 = 60'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [59:0] _rotin_T_30 = 60'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [59:0] _rotout_l_T_29 = 60'hFF00FF00FF00FF; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_38 = 64'hFF00FF00FF00FF0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_30 = 64'hFF00FF00FF00FF0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_34 = 64'hFF00FF00FF00FF0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_31 = 64'hFF00FF00FF00FF0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_30 = 64'hFF00FF00FF00FF0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_39 = 64'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_31 = 64'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_35 = 64'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_32 = 64'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_31 = 64'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_44 = 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_36 = 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_40 = 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_37 = 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_36 = 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [61:0] _shin_T_47 = 62'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [61:0] _shout_l_T_39 = 62'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [61:0] _tz_in_T_43 = 62'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [61:0] _rotin_T_40 = 62'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [61:0] _rotout_l_T_39 = 62'hF0F0F0F0F0F0F0F; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_48 = 64'h3C3C3C3C3C3C3C3C; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_40 = 64'h3C3C3C3C3C3C3C3C; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_44 = 64'h3C3C3C3C3C3C3C3C; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_41 = 64'h3C3C3C3C3C3C3C3C; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_40 = 64'h3C3C3C3C3C3C3C3C; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_49 = 64'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_41 = 64'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_45 = 64'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_42 = 64'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_41 = 64'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_54 = 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_46 = 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_50 = 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_47 = 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_46 = 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [62:0] _shin_T_57 = 63'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [62:0] _shout_l_T_49 = 63'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [62:0] _tz_in_T_53 = 63'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [62:0] _rotin_T_50 = 63'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [62:0] _rotout_l_T_49 = 63'h3333333333333333; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_58 = 64'h6666666666666666; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_50 = 64'h6666666666666666; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_54 = 64'h6666666666666666; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_51 = 64'h6666666666666666; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_50 = 64'h6666666666666666; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_59 = 64'h5555555555555555; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_51 = 64'h5555555555555555; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_55 = 64'h5555555555555555; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_52 = 64'h5555555555555555; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_51 = 64'h5555555555555555; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_64 = 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_56 = 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_60 = 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_57 = 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_56 = 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire _shin_hi_T = io_dw_0; // @[ALU.scala:83:7, :102:31] wire _shamt_T_1 = io_dw_0; // @[ALU.scala:83:7, :103:42] wire [63:0] _in1_bytes_WIRE = io_in1_0; // @[ALU.scala:83:7, :140:34] wire [63:0] _io_adder_out_T_4; // @[ALU.scala:88:36] wire _io_cmp_out_T_5; // @[ALU.scala:94:36] wire [63:0] io_out_0; // @[ALU.scala:83:7] wire [63:0] io_adder_out; // @[ALU.scala:83:7] wire io_cmp_out; // @[ALU.scala:83:7] wire _in2_inv_T = io_fn_0[3]; // @[ALU.scala:58:29, :83:7] wire _io_adder_out_T_2 = io_fn_0[3]; // @[ALU.scala:58:29, :83:7] wire _io_cmp_out_T_1 = io_fn_0[3]; // @[ALU.scala:58:29, :63:30, :83:7] wire _shin_hi_32_T = io_fn_0[3]; // @[ALU.scala:58:29, :83:7] wire _shout_r_T = io_fn_0[3]; // @[ALU.scala:58:29, :83:7] wire [63:0] _in2_inv_T_1 = ~io_in2_0; // @[ALU.scala:83:7, :85:35] wire [63:0] in2_inv = _in2_inv_T ? _in2_inv_T_1 : io_in2_0; // @[ALU.scala:58:29, :83:7, :85:{20,35}] wire [63:0] in1_xor_in2 = io_in1_0 ^ in2_inv; // @[ALU.scala:83:7, :85:20, :86:28] wire [63:0] in1_and_in2 = io_in1_0 & in2_inv; // @[ALU.scala:83:7, :85:20, :87:28] wire [64:0] _io_adder_out_T = {1'h0, io_in1_0} + {1'h0, in2_inv}; // @[ALU.scala:83:7, :85:20, :88:26] wire [63:0] _io_adder_out_T_1 = _io_adder_out_T[63:0]; // @[ALU.scala:88:26] wire [64:0] _io_adder_out_T_3 = {1'h0, _io_adder_out_T_1} + {64'h0, _io_adder_out_T_2}; // @[ALU.scala:58:29, :88:{26,36}] assign _io_adder_out_T_4 = _io_adder_out_T_3[63:0]; // @[ALU.scala:88:36] assign io_adder_out = _io_adder_out_T_4; // @[ALU.scala:83:7, :88:36] wire _slt_T = io_in1_0[63]; // @[ALU.scala:83:7, :92:15] wire _slt_T_6 = io_in1_0[63]; // @[ALU.scala:83:7, :92:15, :93:51] wire _slt_T_1 = io_in2_0[63]; // @[ALU.scala:83:7, :92:34] wire _slt_T_5 = io_in2_0[63]; // @[ALU.scala:83:7, :92:34, :93:35] wire _slt_T_2 = _slt_T == _slt_T_1; // @[ALU.scala:92:{15,24,34}] wire _slt_T_3 = io_adder_out[63]; // @[ALU.scala:83:7, :92:56] wire _slt_T_4 = io_fn_0[1]; // @[ALU.scala:61:35, :83:7] wire _slt_T_7 = _slt_T_4 ? _slt_T_5 : _slt_T_6; // @[ALU.scala:61:35, :93:{8,35,51}] wire slt = _slt_T_2 ? _slt_T_3 : _slt_T_7; // @[ALU.scala:92:{8,24,56}, :93:8] wire _io_cmp_out_T = io_fn_0[0]; // @[ALU.scala:62:35, :83:7] wire _rotin_T = io_fn_0[0]; // @[ALU.scala:62:35, :83:7, :156:24] wire _rotout_T = io_fn_0[0]; // @[ALU.scala:62:35, :83:7, :159:25] wire _rotout_T_2 = io_fn_0[0]; // @[ALU.scala:62:35, :83:7, :159:61] wire _io_cmp_out_T_2 = ~_io_cmp_out_T_1; // @[ALU.scala:63:{26,30}] wire _io_cmp_out_T_3 = in1_xor_in2 == 64'h0; // @[ALU.scala:86:28, :94:68] wire _io_cmp_out_T_4 = _io_cmp_out_T_2 ? _io_cmp_out_T_3 : slt; // @[ALU.scala:63:26, :92:8, :94:{41,68}] assign _io_cmp_out_T_5 = _io_cmp_out_T ^ _io_cmp_out_T_4; // @[ALU.scala:62:35, :94:{36,41}] assign io_cmp_out = _io_cmp_out_T_5; // @[ALU.scala:83:7, :94:36] wire _shin_hi_32_T_1 = io_in1_0[31]; // @[ALU.scala:83:7, :101:55] wire _shin_hi_32_T_2 = _shin_hi_32_T & _shin_hi_32_T_1; // @[ALU.scala:58:29, :101:{46,55}] wire [31:0] shin_hi_32 = {32{_shin_hi_32_T_2}}; // @[ALU.scala:101:{28,46}] wire [31:0] _shin_hi_T_1 = io_in1_0[63:32]; // @[ALU.scala:83:7, :102:48] wire [31:0] _tz_in_T_6 = io_in1_0[63:32]; // @[ALU.scala:83:7, :102:48, :132:19] wire [31:0] shin_hi = _shin_hi_T ? _shin_hi_T_1 : shin_hi_32; // @[ALU.scala:101:28, :102:{24,31,48}] wire _shamt_T = io_in2_0[5]; // @[ALU.scala:83:7, :103:29] wire _shamt_T_2 = _shamt_T & _shamt_T_1; // @[ALU.scala:103:{29,33,42}] wire [4:0] _shamt_T_3 = io_in2_0[4:0]; // @[ALU.scala:83:7, :103:60] wire [5:0] shamt = {_shamt_T_2, _shamt_T_3}; // @[ALU.scala:103:{22,33,60}] wire [31:0] _tz_in_T_8 = io_in1_0[31:0]; // @[ALU.scala:83:7, :104:34, :132:19] wire [31:0] _tz_in_T_63 = io_in1_0[31:0]; // @[ALU.scala:83:7, :104:34, :133:25] wire [31:0] _tz_in_T_65 = io_in1_0[31:0]; // @[ALU.scala:83:7, :104:34, :134:33] wire [31:0] _popc_in_T_2 = io_in1_0[31:0]; // @[ALU.scala:83:7, :104:34, :137:32] wire [63:0] shin_r = {shin_hi, io_in1_0[31:0]}; // @[ALU.scala:83:7, :102:24, :104:{18,34}] wire _GEN = io_fn_0 == 5'h5; // @[package.scala:16:47] wire _shin_T; // @[package.scala:16:47] assign _shin_T = _GEN; // @[package.scala:16:47] wire _shout_T; // @[ALU.scala:109:25] assign _shout_T = _GEN; // @[package.scala:16:47] wire _GEN_0 = io_fn_0 == 5'hB; // @[package.scala:16:47] wire _shin_T_1; // @[package.scala:16:47] assign _shin_T_1 = _GEN_0; // @[package.scala:16:47] wire _shout_T_1; // @[ALU.scala:109:44] assign _shout_T_1 = _GEN_0; // @[package.scala:16:47] wire _GEN_1 = io_fn_0 == 5'h12; // @[package.scala:16:47] wire _shin_T_2; // @[package.scala:16:47] assign _shin_T_2 = _GEN_1; // @[package.scala:16:47] wire _out_T_16; // @[ALU.scala:161:47] assign _out_T_16 = _GEN_1; // @[package.scala:16:47] wire _GEN_2 = io_fn_0 == 5'h13; // @[package.scala:16:47] wire _shin_T_3; // @[package.scala:16:47] assign _shin_T_3 = _GEN_2; // @[package.scala:16:47] wire _shout_T_3; // @[ALU.scala:109:64] assign _shout_T_3 = _GEN_2; // @[package.scala:16:47] wire _bext_mask_T; // @[ALU.scala:122:52] assign _bext_mask_T = _GEN_2; // @[package.scala:16:47] wire _shin_T_4 = _shin_T | _shin_T_1; // @[package.scala:16:47, :81:59] wire _shin_T_5 = _shin_T_4 | _shin_T_2; // @[package.scala:16:47, :81:59] wire _shin_T_6 = _shin_T_5 | _shin_T_3; // @[package.scala:16:47, :81:59] wire _shin_T_7 = ~_shin_T_6; // @[package.scala:81:59] wire [31:0] _shin_T_10 = shin_r[63:32]; // @[ALU.scala:104:18, :106:46] wire [31:0] _rotin_T_3 = shin_r[63:32]; // @[ALU.scala:104:18, :106:46, :156:44] wire [63:0] _shin_T_11 = {32'h0, _shin_T_10}; // @[ALU.scala:106:46] wire [31:0] _shin_T_12 = shin_r[31:0]; // @[ALU.scala:104:18, :106:46] wire [31:0] _rotin_T_5 = shin_r[31:0]; // @[ALU.scala:104:18, :106:46, :156:44] wire [63:0] _shin_T_13 = {_shin_T_12, 32'h0}; // @[ALU.scala:106:46] wire [63:0] _shin_T_15 = _shin_T_13 & 64'hFFFFFFFF00000000; // @[ALU.scala:106:46] wire [63:0] _shin_T_16 = _shin_T_11 | _shin_T_15; // @[ALU.scala:106:46] wire [47:0] _shin_T_20 = _shin_T_16[63:16]; // @[ALU.scala:106:46] wire [63:0] _shin_T_21 = {16'h0, _shin_T_20 & 48'hFFFF0000FFFF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [47:0] _shin_T_22 = _shin_T_16[47:0]; // @[ALU.scala:106:46] wire [63:0] _shin_T_23 = {_shin_T_22, 16'h0}; // @[ALU.scala:106:46] wire [63:0] _shin_T_25 = _shin_T_23 & 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_26 = _shin_T_21 | _shin_T_25; // @[ALU.scala:106:46] wire [55:0] _shin_T_30 = _shin_T_26[63:8]; // @[ALU.scala:106:46] wire [63:0] _shin_T_31 = {8'h0, _shin_T_30 & 56'hFF00FF00FF00FF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [55:0] _shin_T_32 = _shin_T_26[55:0]; // @[ALU.scala:106:46] wire [63:0] _shin_T_33 = {_shin_T_32, 8'h0}; // @[ALU.scala:106:46] wire [63:0] _shin_T_35 = _shin_T_33 & 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_36 = _shin_T_31 | _shin_T_35; // @[ALU.scala:106:46] wire [59:0] _shin_T_40 = _shin_T_36[63:4]; // @[ALU.scala:106:46] wire [63:0] _shin_T_41 = {4'h0, _shin_T_40 & 60'hF0F0F0F0F0F0F0F}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [59:0] _shin_T_42 = _shin_T_36[59:0]; // @[ALU.scala:106:46] wire [63:0] _shin_T_43 = {_shin_T_42, 4'h0}; // @[ALU.scala:106:46] wire [63:0] _shin_T_45 = _shin_T_43 & 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_46 = _shin_T_41 | _shin_T_45; // @[ALU.scala:106:46] wire [61:0] _shin_T_50 = _shin_T_46[63:2]; // @[ALU.scala:106:46] wire [63:0] _shin_T_51 = {2'h0, _shin_T_50 & 62'h3333333333333333}; // @[package.scala:16:47] wire [61:0] _shin_T_52 = _shin_T_46[61:0]; // @[ALU.scala:106:46] wire [63:0] _shin_T_53 = {_shin_T_52, 2'h0}; // @[package.scala:16:47] wire [63:0] _shin_T_55 = _shin_T_53 & 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_56 = _shin_T_51 | _shin_T_55; // @[ALU.scala:106:46] wire [62:0] _shin_T_60 = _shin_T_56[63:1]; // @[ALU.scala:106:46] wire [63:0] _shin_T_61 = {1'h0, _shin_T_60 & 63'h5555555555555555}; // @[ALU.scala:88:26, :106:46, :108:24, :132:19, :156:44, :158:25] wire [62:0] _shin_T_62 = _shin_T_56[62:0]; // @[ALU.scala:106:46] wire [63:0] _shin_T_63 = {_shin_T_62, 1'h0}; // @[ALU.scala:88:26, :106:46] wire [63:0] _shin_T_65 = _shin_T_63 & 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shin_T_66 = _shin_T_61 | _shin_T_65; // @[ALU.scala:106:46] wire [63:0] shin = _shin_T_7 ? _shin_T_66 : shin_r; // @[ALU.scala:64:33, :104:18, :106:{17,46}] wire _shout_r_T_1 = shin[63]; // @[ALU.scala:106:17, :107:41] wire _shout_r_T_2 = _shout_r_T & _shout_r_T_1; // @[ALU.scala:58:29, :107:{35,41}] wire [64:0] _shout_r_T_3 = {_shout_r_T_2, shin}; // @[ALU.scala:106:17, :107:{21,35}] wire [64:0] _shout_r_T_4 = _shout_r_T_3; // @[ALU.scala:107:{21,57}] wire [64:0] _shout_r_T_5 = $signed($signed(_shout_r_T_4) >>> shamt); // @[ALU.scala:103:22, :107:{57,64}] wire [63:0] shout_r = _shout_r_T_5[63:0]; // @[ALU.scala:107:{64,73}] wire [31:0] _shout_l_T_2 = shout_r[63:32]; // @[ALU.scala:107:73, :108:24] wire [63:0] _shout_l_T_3 = {32'h0, _shout_l_T_2}; // @[ALU.scala:108:24] wire [31:0] _shout_l_T_4 = shout_r[31:0]; // @[ALU.scala:107:73, :108:24] wire [63:0] _shout_l_T_5 = {_shout_l_T_4, 32'h0}; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_7 = _shout_l_T_5 & 64'hFFFFFFFF00000000; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_8 = _shout_l_T_3 | _shout_l_T_7; // @[ALU.scala:108:24] wire [47:0] _shout_l_T_12 = _shout_l_T_8[63:16]; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_13 = {16'h0, _shout_l_T_12 & 48'hFFFF0000FFFF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [47:0] _shout_l_T_14 = _shout_l_T_8[47:0]; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_15 = {_shout_l_T_14, 16'h0}; // @[ALU.scala:106:46, :108:24] wire [63:0] _shout_l_T_17 = _shout_l_T_15 & 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_18 = _shout_l_T_13 | _shout_l_T_17; // @[ALU.scala:108:24] wire [55:0] _shout_l_T_22 = _shout_l_T_18[63:8]; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_23 = {8'h0, _shout_l_T_22 & 56'hFF00FF00FF00FF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [55:0] _shout_l_T_24 = _shout_l_T_18[55:0]; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_25 = {_shout_l_T_24, 8'h0}; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_27 = _shout_l_T_25 & 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_28 = _shout_l_T_23 | _shout_l_T_27; // @[ALU.scala:108:24] wire [59:0] _shout_l_T_32 = _shout_l_T_28[63:4]; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_33 = {4'h0, _shout_l_T_32 & 60'hF0F0F0F0F0F0F0F}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [59:0] _shout_l_T_34 = _shout_l_T_28[59:0]; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_35 = {_shout_l_T_34, 4'h0}; // @[ALU.scala:106:46, :108:24] wire [63:0] _shout_l_T_37 = _shout_l_T_35 & 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_38 = _shout_l_T_33 | _shout_l_T_37; // @[ALU.scala:108:24] wire [61:0] _shout_l_T_42 = _shout_l_T_38[63:2]; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_43 = {2'h0, _shout_l_T_42 & 62'h3333333333333333}; // @[package.scala:16:47] wire [61:0] _shout_l_T_44 = _shout_l_T_38[61:0]; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_45 = {_shout_l_T_44, 2'h0}; // @[package.scala:16:47] wire [63:0] _shout_l_T_47 = _shout_l_T_45 & 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _shout_l_T_48 = _shout_l_T_43 | _shout_l_T_47; // @[ALU.scala:108:24] wire [62:0] _shout_l_T_52 = _shout_l_T_48[63:1]; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_53 = {1'h0, _shout_l_T_52 & 63'h5555555555555555}; // @[ALU.scala:88:26, :106:46, :108:24, :132:19, :156:44, :158:25] wire [62:0] _shout_l_T_54 = _shout_l_T_48[62:0]; // @[ALU.scala:108:24] wire [63:0] _shout_l_T_55 = {_shout_l_T_54, 1'h0}; // @[ALU.scala:88:26, :108:24] wire [63:0] _shout_l_T_57 = _shout_l_T_55 & 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] shout_l = _shout_l_T_53 | _shout_l_T_57; // @[ALU.scala:108:24] wire _shout_T_2 = _shout_T | _shout_T_1; // @[ALU.scala:109:{25,35,44}] wire _shout_T_4 = _shout_T_2 | _shout_T_3; // @[ALU.scala:109:{35,55,64}] wire [63:0] _shout_T_5 = _shout_T_4 ? shout_r : 64'h0; // @[ALU.scala:107:73, :109:{18,55}] wire _shout_T_6 = io_fn_0 == 5'h1; // @[ALU.scala:83:7, :110:25] wire [63:0] _shout_T_7 = _shout_T_6 ? shout_l : 64'h0; // @[ALU.scala:108:24, :110:{18,25}] wire [63:0] shout = _shout_T_5 | _shout_T_7; // @[ALU.scala:109:{18,91}, :110:18] wire in2_not_zero = |io_in2_0; // @[ALU.scala:83:7, :113:29] wire _logic_T = io_fn_0 == 5'h4; // @[ALU.scala:83:7, :119:25] wire _GEN_3 = io_fn_0 == 5'h6; // @[ALU.scala:83:7, :119:45] wire _logic_T_1; // @[ALU.scala:119:45] assign _logic_T_1 = _GEN_3; // @[ALU.scala:119:45] wire _logic_T_8; // @[ALU.scala:120:25] assign _logic_T_8 = _GEN_3; // @[ALU.scala:119:45, :120:25] wire _logic_T_2 = _logic_T | _logic_T_1; // @[ALU.scala:119:{25,36,45}] wire _GEN_4 = io_fn_0 == 5'h19; // @[ALU.scala:83:7, :119:64] wire _logic_T_3; // @[ALU.scala:119:64] assign _logic_T_3 = _GEN_4; // @[ALU.scala:119:64] wire _logic_T_11; // @[ALU.scala:120:64] assign _logic_T_11 = _GEN_4; // @[ALU.scala:119:64, :120:64] wire _logic_T_4 = _logic_T_2 | _logic_T_3; // @[ALU.scala:119:{36,55,64}] wire _logic_T_5 = io_fn_0 == 5'h1A; // @[ALU.scala:83:7, :119:84] wire _logic_T_6 = _logic_T_4 | _logic_T_5; // @[ALU.scala:119:{55,75,84}] wire [63:0] _logic_T_7 = _logic_T_6 ? in1_xor_in2 : 64'h0; // @[ALU.scala:86:28, :119:{18,75}] wire _logic_T_9 = io_fn_0 == 5'h7; // @[ALU.scala:83:7, :120:44] wire _logic_T_10 = _logic_T_8 | _logic_T_9; // @[ALU.scala:120:{25,35,44}] wire _logic_T_12 = _logic_T_10 | _logic_T_11; // @[ALU.scala:120:{35,55,64}] wire _logic_T_13 = io_fn_0 == 5'h18; // @[ALU.scala:83:7, :120:84] wire _logic_T_14 = _logic_T_12 | _logic_T_13; // @[ALU.scala:120:{55,75,84}] wire [63:0] _logic_T_15 = _logic_T_14 ? in1_and_in2 : 64'h0; // @[ALU.scala:87:28, :120:{18,75}] wire [63:0] logic_0 = _logic_T_7 | _logic_T_15; // @[ALU.scala:119:{18,115}, :120:18] wire _bext_mask_T_1 = _bext_mask_T; // @[ALU.scala:122:{43,52}] wire [63:0] bext_mask = _bext_mask_T_1 ? 64'h1 : 64'hFFFFFFFFFFFFFFFF; // @[ALU.scala:122:{22,43}] wire _shift_logic_T = io_fn_0 > 5'hB; // @[ALU.scala:59:31, :83:7] wire _shift_logic_T_1 = ~(io_fn_0[4]); // @[ALU.scala:59:48, :83:7] wire _shift_logic_T_2 = _shift_logic_T & _shift_logic_T_1; // @[ALU.scala:59:{31,41,48}] wire _shift_logic_T_3 = _shift_logic_T_2 & slt; // @[ALU.scala:59:41, :92:8, :123:36] wire [63:0] _shift_logic_T_4 = {63'h0, _shift_logic_T_3} | logic_0; // @[ALU.scala:119:115, :123:{36,44}] wire [63:0] _shift_logic_T_5 = shout & bext_mask; // @[ALU.scala:109:91, :122:22, :123:61] wire [63:0] shift_logic = _shift_logic_T_4 | _shift_logic_T_5; // @[ALU.scala:123:{44,52,61}] wire _tz_in_T = ~io_dw_0; // @[ALU.scala:83:7, :130:32] wire _tz_in_T_1 = io_in2_0[0]; // @[ALU.scala:83:7, :130:53] wire _tz_in_T_2 = ~_tz_in_T_1; // @[ALU.scala:130:{46,53}] wire [1:0] _tz_in_T_3 = {_tz_in_T, _tz_in_T_2}; // @[ALU.scala:130:{32,43,46}] wire [63:0] _tz_in_T_7 = {32'h0, _tz_in_T_6}; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_9 = {_tz_in_T_8, 32'h0}; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_11 = _tz_in_T_9 & 64'hFFFFFFFF00000000; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_12 = _tz_in_T_7 | _tz_in_T_11; // @[ALU.scala:132:19] wire [47:0] _tz_in_T_16 = _tz_in_T_12[63:16]; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_17 = {16'h0, _tz_in_T_16 & 48'hFFFF0000FFFF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [47:0] _tz_in_T_18 = _tz_in_T_12[47:0]; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_19 = {_tz_in_T_18, 16'h0}; // @[ALU.scala:106:46, :132:19] wire [63:0] _tz_in_T_21 = _tz_in_T_19 & 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_22 = _tz_in_T_17 | _tz_in_T_21; // @[ALU.scala:132:19] wire [55:0] _tz_in_T_26 = _tz_in_T_22[63:8]; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_27 = {8'h0, _tz_in_T_26 & 56'hFF00FF00FF00FF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [55:0] _tz_in_T_28 = _tz_in_T_22[55:0]; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_29 = {_tz_in_T_28, 8'h0}; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_31 = _tz_in_T_29 & 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_32 = _tz_in_T_27 | _tz_in_T_31; // @[ALU.scala:132:19] wire [59:0] _tz_in_T_36 = _tz_in_T_32[63:4]; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_37 = {4'h0, _tz_in_T_36 & 60'hF0F0F0F0F0F0F0F}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [59:0] _tz_in_T_38 = _tz_in_T_32[59:0]; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_39 = {_tz_in_T_38, 4'h0}; // @[ALU.scala:106:46, :132:19] wire [63:0] _tz_in_T_41 = _tz_in_T_39 & 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_42 = _tz_in_T_37 | _tz_in_T_41; // @[ALU.scala:132:19] wire [61:0] _tz_in_T_46 = _tz_in_T_42[63:2]; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_47 = {2'h0, _tz_in_T_46 & 62'h3333333333333333}; // @[package.scala:16:47] wire [61:0] _tz_in_T_48 = _tz_in_T_42[61:0]; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_49 = {_tz_in_T_48, 2'h0}; // @[package.scala:16:47] wire [63:0] _tz_in_T_51 = _tz_in_T_49 & 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_52 = _tz_in_T_47 | _tz_in_T_51; // @[ALU.scala:132:19] wire [62:0] _tz_in_T_56 = _tz_in_T_52[63:1]; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_57 = {1'h0, _tz_in_T_56 & 63'h5555555555555555}; // @[ALU.scala:88:26, :106:46, :108:24, :132:19, :156:44, :158:25] wire [62:0] _tz_in_T_58 = _tz_in_T_52[62:0]; // @[ALU.scala:132:19] wire [63:0] _tz_in_T_59 = {_tz_in_T_58, 1'h0}; // @[ALU.scala:88:26, :132:19] wire [63:0] _tz_in_T_61 = _tz_in_T_59 & 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _tz_in_T_62 = _tz_in_T_57 | _tz_in_T_61; // @[ALU.scala:132:19] wire [32:0] _tz_in_T_64 = {1'h1, _tz_in_T_63}; // @[ALU.scala:133:{16,25}] wire [15:0] _tz_in_T_68 = _tz_in_T_65[31:16]; // @[ALU.scala:134:{26,33}] wire [31:0] _tz_in_T_69 = {16'h0, _tz_in_T_68}; // @[ALU.scala:106:46, :134:26] wire [15:0] _tz_in_T_70 = _tz_in_T_65[15:0]; // @[ALU.scala:134:{26,33}] wire [31:0] _tz_in_T_71 = {_tz_in_T_70, 16'h0}; // @[ALU.scala:106:46, :134:26] wire [31:0] _tz_in_T_73 = _tz_in_T_71 & 32'hFFFF0000; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_74 = _tz_in_T_69 | _tz_in_T_73; // @[ALU.scala:134:26] wire [23:0] _tz_in_T_78 = _tz_in_T_74[31:8]; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_79 = {8'h0, _tz_in_T_78 & 24'hFF00FF}; // @[ALU.scala:134:26] wire [23:0] _tz_in_T_80 = _tz_in_T_74[23:0]; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_81 = {_tz_in_T_80, 8'h0}; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_83 = _tz_in_T_81 & 32'hFF00FF00; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_84 = _tz_in_T_79 | _tz_in_T_83; // @[ALU.scala:134:26] wire [27:0] _tz_in_T_88 = _tz_in_T_84[31:4]; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_89 = {4'h0, _tz_in_T_88 & 28'hF0F0F0F}; // @[ALU.scala:106:46, :134:26] wire [27:0] _tz_in_T_90 = _tz_in_T_84[27:0]; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_91 = {_tz_in_T_90, 4'h0}; // @[ALU.scala:106:46, :134:26] wire [31:0] _tz_in_T_93 = _tz_in_T_91 & 32'hF0F0F0F0; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_94 = _tz_in_T_89 | _tz_in_T_93; // @[ALU.scala:134:26] wire [29:0] _tz_in_T_98 = _tz_in_T_94[31:2]; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_99 = {2'h0, _tz_in_T_98 & 30'h33333333}; // @[package.scala:16:47] wire [29:0] _tz_in_T_100 = _tz_in_T_94[29:0]; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_101 = {_tz_in_T_100, 2'h0}; // @[package.scala:16:47] wire [31:0] _tz_in_T_103 = _tz_in_T_101 & 32'hCCCCCCCC; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_104 = _tz_in_T_99 | _tz_in_T_103; // @[ALU.scala:134:26] wire [30:0] _tz_in_T_108 = _tz_in_T_104[31:1]; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_109 = {1'h0, _tz_in_T_108 & 31'h55555555}; // @[ALU.scala:88:26, :134:26] wire [30:0] _tz_in_T_110 = _tz_in_T_104[30:0]; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_111 = {_tz_in_T_110, 1'h0}; // @[ALU.scala:88:26, :134:26] wire [31:0] _tz_in_T_113 = _tz_in_T_111 & 32'hAAAAAAAA; // @[ALU.scala:134:26] wire [31:0] _tz_in_T_114 = _tz_in_T_109 | _tz_in_T_113; // @[ALU.scala:134:26] wire [32:0] _tz_in_T_115 = {1'h1, _tz_in_T_114}; // @[ALU.scala:134:{16,26}] wire _tz_in_T_116 = _tz_in_T_3 == 2'h1; // @[ALU.scala:130:{43,62}] wire [63:0] _tz_in_T_117 = _tz_in_T_116 ? _tz_in_T_62 : io_in1_0; // @[ALU.scala:83:7, :130:62, :132:19] wire _tz_in_T_118 = _tz_in_T_3 == 2'h2; // @[ALU.scala:130:{43,62}] wire [63:0] _tz_in_T_119 = _tz_in_T_118 ? {31'h0, _tz_in_T_64} : _tz_in_T_117; // @[ALU.scala:130:62, :133:16] wire _tz_in_T_120 = &_tz_in_T_3; // @[ALU.scala:130:{43,62}] wire [63:0] tz_in = _tz_in_T_120 ? {31'h0, _tz_in_T_115} : _tz_in_T_119; // @[ALU.scala:130:62, :134:16] wire _popc_in_T = io_in2_0[1]; // @[ALU.scala:83:7, :136:27] wire _popc_in_T_1 = ~io_dw_0; // @[ALU.scala:83:7, :130:32, :137:15] wire [63:0] _popc_in_T_3 = _popc_in_T_1 ? {32'h0, _popc_in_T_2} : io_in1_0; // @[ALU.scala:83:7, :137:{8,15,32}] wire [64:0] _popc_in_T_4 = {1'h1, tz_in}; // @[ALU.scala:130:62, :138:27] wire _popc_in_T_5 = _popc_in_T_4[0]; // @[OneHot.scala:85:71] wire _popc_in_T_6 = _popc_in_T_4[1]; // @[OneHot.scala:85:71] wire _popc_in_T_7 = _popc_in_T_4[2]; // @[OneHot.scala:85:71] wire _popc_in_T_8 = _popc_in_T_4[3]; // @[OneHot.scala:85:71] wire _popc_in_T_9 = _popc_in_T_4[4]; // @[OneHot.scala:85:71] wire _popc_in_T_10 = _popc_in_T_4[5]; // @[OneHot.scala:85:71] wire _popc_in_T_11 = _popc_in_T_4[6]; // @[OneHot.scala:85:71] wire _popc_in_T_12 = _popc_in_T_4[7]; // @[OneHot.scala:85:71] wire _popc_in_T_13 = _popc_in_T_4[8]; // @[OneHot.scala:85:71] wire _popc_in_T_14 = _popc_in_T_4[9]; // @[OneHot.scala:85:71] wire _popc_in_T_15 = _popc_in_T_4[10]; // @[OneHot.scala:85:71] wire _popc_in_T_16 = _popc_in_T_4[11]; // @[OneHot.scala:85:71] wire _popc_in_T_17 = _popc_in_T_4[12]; // @[OneHot.scala:85:71] wire _popc_in_T_18 = _popc_in_T_4[13]; // @[OneHot.scala:85:71] wire _popc_in_T_19 = _popc_in_T_4[14]; // @[OneHot.scala:85:71] wire _popc_in_T_20 = _popc_in_T_4[15]; // @[OneHot.scala:85:71] wire _popc_in_T_21 = _popc_in_T_4[16]; // @[OneHot.scala:85:71] wire _popc_in_T_22 = _popc_in_T_4[17]; // @[OneHot.scala:85:71] wire _popc_in_T_23 = _popc_in_T_4[18]; // @[OneHot.scala:85:71] wire _popc_in_T_24 = _popc_in_T_4[19]; // @[OneHot.scala:85:71] wire _popc_in_T_25 = _popc_in_T_4[20]; // @[OneHot.scala:85:71] wire _popc_in_T_26 = _popc_in_T_4[21]; // @[OneHot.scala:85:71] wire _popc_in_T_27 = _popc_in_T_4[22]; // @[OneHot.scala:85:71] wire _popc_in_T_28 = _popc_in_T_4[23]; // @[OneHot.scala:85:71] wire _popc_in_T_29 = _popc_in_T_4[24]; // @[OneHot.scala:85:71] wire _popc_in_T_30 = _popc_in_T_4[25]; // @[OneHot.scala:85:71] wire _popc_in_T_31 = _popc_in_T_4[26]; // @[OneHot.scala:85:71] wire _popc_in_T_32 = _popc_in_T_4[27]; // @[OneHot.scala:85:71] wire _popc_in_T_33 = _popc_in_T_4[28]; // @[OneHot.scala:85:71] wire _popc_in_T_34 = _popc_in_T_4[29]; // @[OneHot.scala:85:71] wire _popc_in_T_35 = _popc_in_T_4[30]; // @[OneHot.scala:85:71] wire _popc_in_T_36 = _popc_in_T_4[31]; // @[OneHot.scala:85:71] wire _popc_in_T_37 = _popc_in_T_4[32]; // @[OneHot.scala:85:71] wire _popc_in_T_38 = _popc_in_T_4[33]; // @[OneHot.scala:85:71] wire _popc_in_T_39 = _popc_in_T_4[34]; // @[OneHot.scala:85:71] wire _popc_in_T_40 = _popc_in_T_4[35]; // @[OneHot.scala:85:71] wire _popc_in_T_41 = _popc_in_T_4[36]; // @[OneHot.scala:85:71] wire _popc_in_T_42 = _popc_in_T_4[37]; // @[OneHot.scala:85:71] wire _popc_in_T_43 = _popc_in_T_4[38]; // @[OneHot.scala:85:71] wire _popc_in_T_44 = _popc_in_T_4[39]; // @[OneHot.scala:85:71] wire _popc_in_T_45 = _popc_in_T_4[40]; // @[OneHot.scala:85:71] wire _popc_in_T_46 = _popc_in_T_4[41]; // @[OneHot.scala:85:71] wire _popc_in_T_47 = _popc_in_T_4[42]; // @[OneHot.scala:85:71] wire _popc_in_T_48 = _popc_in_T_4[43]; // @[OneHot.scala:85:71] wire _popc_in_T_49 = _popc_in_T_4[44]; // @[OneHot.scala:85:71] wire _popc_in_T_50 = _popc_in_T_4[45]; // @[OneHot.scala:85:71] wire _popc_in_T_51 = _popc_in_T_4[46]; // @[OneHot.scala:85:71] wire _popc_in_T_52 = _popc_in_T_4[47]; // @[OneHot.scala:85:71] wire _popc_in_T_53 = _popc_in_T_4[48]; // @[OneHot.scala:85:71] wire _popc_in_T_54 = _popc_in_T_4[49]; // @[OneHot.scala:85:71] wire _popc_in_T_55 = _popc_in_T_4[50]; // @[OneHot.scala:85:71] wire _popc_in_T_56 = _popc_in_T_4[51]; // @[OneHot.scala:85:71] wire _popc_in_T_57 = _popc_in_T_4[52]; // @[OneHot.scala:85:71] wire _popc_in_T_58 = _popc_in_T_4[53]; // @[OneHot.scala:85:71] wire _popc_in_T_59 = _popc_in_T_4[54]; // @[OneHot.scala:85:71] wire _popc_in_T_60 = _popc_in_T_4[55]; // @[OneHot.scala:85:71] wire _popc_in_T_61 = _popc_in_T_4[56]; // @[OneHot.scala:85:71] wire _popc_in_T_62 = _popc_in_T_4[57]; // @[OneHot.scala:85:71] wire _popc_in_T_63 = _popc_in_T_4[58]; // @[OneHot.scala:85:71] wire _popc_in_T_64 = _popc_in_T_4[59]; // @[OneHot.scala:85:71] wire _popc_in_T_65 = _popc_in_T_4[60]; // @[OneHot.scala:85:71] wire _popc_in_T_66 = _popc_in_T_4[61]; // @[OneHot.scala:85:71] wire _popc_in_T_67 = _popc_in_T_4[62]; // @[OneHot.scala:85:71] wire _popc_in_T_68 = _popc_in_T_4[63]; // @[OneHot.scala:85:71] wire _popc_in_T_69 = _popc_in_T_4[64]; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_70 = {_popc_in_T_69, 64'h0}; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_71 = _popc_in_T_68 ? 65'h8000000000000000 : _popc_in_T_70; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_72 = _popc_in_T_67 ? 65'h4000000000000000 : _popc_in_T_71; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_73 = _popc_in_T_66 ? 65'h2000000000000000 : _popc_in_T_72; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_74 = _popc_in_T_65 ? 65'h1000000000000000 : _popc_in_T_73; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_75 = _popc_in_T_64 ? 65'h800000000000000 : _popc_in_T_74; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_76 = _popc_in_T_63 ? 65'h400000000000000 : _popc_in_T_75; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_77 = _popc_in_T_62 ? 65'h200000000000000 : _popc_in_T_76; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_78 = _popc_in_T_61 ? 65'h100000000000000 : _popc_in_T_77; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_79 = _popc_in_T_60 ? 65'h80000000000000 : _popc_in_T_78; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_80 = _popc_in_T_59 ? 65'h40000000000000 : _popc_in_T_79; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_81 = _popc_in_T_58 ? 65'h20000000000000 : _popc_in_T_80; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_82 = _popc_in_T_57 ? 65'h10000000000000 : _popc_in_T_81; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_83 = _popc_in_T_56 ? 65'h8000000000000 : _popc_in_T_82; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_84 = _popc_in_T_55 ? 65'h4000000000000 : _popc_in_T_83; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_85 = _popc_in_T_54 ? 65'h2000000000000 : _popc_in_T_84; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_86 = _popc_in_T_53 ? 65'h1000000000000 : _popc_in_T_85; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_87 = _popc_in_T_52 ? 65'h800000000000 : _popc_in_T_86; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_88 = _popc_in_T_51 ? 65'h400000000000 : _popc_in_T_87; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_89 = _popc_in_T_50 ? 65'h200000000000 : _popc_in_T_88; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_90 = _popc_in_T_49 ? 65'h100000000000 : _popc_in_T_89; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_91 = _popc_in_T_48 ? 65'h80000000000 : _popc_in_T_90; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_92 = _popc_in_T_47 ? 65'h40000000000 : _popc_in_T_91; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_93 = _popc_in_T_46 ? 65'h20000000000 : _popc_in_T_92; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_94 = _popc_in_T_45 ? 65'h10000000000 : _popc_in_T_93; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_95 = _popc_in_T_44 ? 65'h8000000000 : _popc_in_T_94; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_96 = _popc_in_T_43 ? 65'h4000000000 : _popc_in_T_95; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_97 = _popc_in_T_42 ? 65'h2000000000 : _popc_in_T_96; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_98 = _popc_in_T_41 ? 65'h1000000000 : _popc_in_T_97; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_99 = _popc_in_T_40 ? 65'h800000000 : _popc_in_T_98; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_100 = _popc_in_T_39 ? 65'h400000000 : _popc_in_T_99; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_101 = _popc_in_T_38 ? 65'h200000000 : _popc_in_T_100; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_102 = _popc_in_T_37 ? 65'h100000000 : _popc_in_T_101; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_103 = _popc_in_T_36 ? 65'h80000000 : _popc_in_T_102; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_104 = _popc_in_T_35 ? 65'h40000000 : _popc_in_T_103; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_105 = _popc_in_T_34 ? 65'h20000000 : _popc_in_T_104; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_106 = _popc_in_T_33 ? 65'h10000000 : _popc_in_T_105; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_107 = _popc_in_T_32 ? 65'h8000000 : _popc_in_T_106; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_108 = _popc_in_T_31 ? 65'h4000000 : _popc_in_T_107; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_109 = _popc_in_T_30 ? 65'h2000000 : _popc_in_T_108; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_110 = _popc_in_T_29 ? 65'h1000000 : _popc_in_T_109; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_111 = _popc_in_T_28 ? 65'h800000 : _popc_in_T_110; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_112 = _popc_in_T_27 ? 65'h400000 : _popc_in_T_111; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_113 = _popc_in_T_26 ? 65'h200000 : _popc_in_T_112; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_114 = _popc_in_T_25 ? 65'h100000 : _popc_in_T_113; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_115 = _popc_in_T_24 ? 65'h80000 : _popc_in_T_114; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_116 = _popc_in_T_23 ? 65'h40000 : _popc_in_T_115; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_117 = _popc_in_T_22 ? 65'h20000 : _popc_in_T_116; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_118 = _popc_in_T_21 ? 65'h10000 : _popc_in_T_117; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_119 = _popc_in_T_20 ? 65'h8000 : _popc_in_T_118; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_120 = _popc_in_T_19 ? 65'h4000 : _popc_in_T_119; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_121 = _popc_in_T_18 ? 65'h2000 : _popc_in_T_120; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_122 = _popc_in_T_17 ? 65'h1000 : _popc_in_T_121; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_123 = _popc_in_T_16 ? 65'h800 : _popc_in_T_122; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_124 = _popc_in_T_15 ? 65'h400 : _popc_in_T_123; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_125 = _popc_in_T_14 ? 65'h200 : _popc_in_T_124; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_126 = _popc_in_T_13 ? 65'h100 : _popc_in_T_125; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_127 = _popc_in_T_12 ? 65'h80 : _popc_in_T_126; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_128 = _popc_in_T_11 ? 65'h40 : _popc_in_T_127; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_129 = _popc_in_T_10 ? 65'h20 : _popc_in_T_128; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_130 = _popc_in_T_9 ? 65'h10 : _popc_in_T_129; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_131 = _popc_in_T_8 ? 65'h8 : _popc_in_T_130; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_132 = _popc_in_T_7 ? 65'h4 : _popc_in_T_131; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_133 = _popc_in_T_6 ? 65'h2 : _popc_in_T_132; // @[OneHot.scala:85:71] wire [64:0] _popc_in_T_134 = _popc_in_T_5 ? 65'h1 : _popc_in_T_133; // @[OneHot.scala:85:71] wire [65:0] _popc_in_T_135 = {1'h0, _popc_in_T_134} - 66'h1; // @[Mux.scala:50:70] wire [64:0] _popc_in_T_136 = _popc_in_T_135[64:0]; // @[ALU.scala:138:37] wire [64:0] _popc_in_T_137 = _popc_in_T ? {1'h0, _popc_in_T_3} : _popc_in_T_136; // @[ALU.scala:88:26, :136:{20,27}, :137:8, :138:37] wire [63:0] popc_in = _popc_in_T_137[63:0]; // @[ALU.scala:136:20, :138:43] wire _count_T = popc_in[0]; // @[ALU.scala:138:43, :139:23] wire _count_T_1 = popc_in[1]; // @[ALU.scala:138:43, :139:23] wire _count_T_2 = popc_in[2]; // @[ALU.scala:138:43, :139:23] wire _count_T_3 = popc_in[3]; // @[ALU.scala:138:43, :139:23] wire _count_T_4 = popc_in[4]; // @[ALU.scala:138:43, :139:23] wire _count_T_5 = popc_in[5]; // @[ALU.scala:138:43, :139:23] wire _count_T_6 = popc_in[6]; // @[ALU.scala:138:43, :139:23] wire _count_T_7 = popc_in[7]; // @[ALU.scala:138:43, :139:23] wire _count_T_8 = popc_in[8]; // @[ALU.scala:138:43, :139:23] wire _count_T_9 = popc_in[9]; // @[ALU.scala:138:43, :139:23] wire _count_T_10 = popc_in[10]; // @[ALU.scala:138:43, :139:23] wire _count_T_11 = popc_in[11]; // @[ALU.scala:138:43, :139:23] wire _count_T_12 = popc_in[12]; // @[ALU.scala:138:43, :139:23] wire _count_T_13 = popc_in[13]; // @[ALU.scala:138:43, :139:23] wire _count_T_14 = popc_in[14]; // @[ALU.scala:138:43, :139:23] wire _count_T_15 = popc_in[15]; // @[ALU.scala:138:43, :139:23] wire _count_T_16 = popc_in[16]; // @[ALU.scala:138:43, :139:23] wire _count_T_17 = popc_in[17]; // @[ALU.scala:138:43, :139:23] wire _count_T_18 = popc_in[18]; // @[ALU.scala:138:43, :139:23] wire _count_T_19 = popc_in[19]; // @[ALU.scala:138:43, :139:23] wire _count_T_20 = popc_in[20]; // @[ALU.scala:138:43, :139:23] wire _count_T_21 = popc_in[21]; // @[ALU.scala:138:43, :139:23] wire _count_T_22 = popc_in[22]; // @[ALU.scala:138:43, :139:23] wire _count_T_23 = popc_in[23]; // @[ALU.scala:138:43, :139:23] wire _count_T_24 = popc_in[24]; // @[ALU.scala:138:43, :139:23] wire _count_T_25 = popc_in[25]; // @[ALU.scala:138:43, :139:23] wire _count_T_26 = popc_in[26]; // @[ALU.scala:138:43, :139:23] wire _count_T_27 = popc_in[27]; // @[ALU.scala:138:43, :139:23] wire _count_T_28 = popc_in[28]; // @[ALU.scala:138:43, :139:23] wire _count_T_29 = popc_in[29]; // @[ALU.scala:138:43, :139:23] wire _count_T_30 = popc_in[30]; // @[ALU.scala:138:43, :139:23] wire _count_T_31 = popc_in[31]; // @[ALU.scala:138:43, :139:23] wire _count_T_32 = popc_in[32]; // @[ALU.scala:138:43, :139:23] wire _count_T_33 = popc_in[33]; // @[ALU.scala:138:43, :139:23] wire _count_T_34 = popc_in[34]; // @[ALU.scala:138:43, :139:23] wire _count_T_35 = popc_in[35]; // @[ALU.scala:138:43, :139:23] wire _count_T_36 = popc_in[36]; // @[ALU.scala:138:43, :139:23] wire _count_T_37 = popc_in[37]; // @[ALU.scala:138:43, :139:23] wire _count_T_38 = popc_in[38]; // @[ALU.scala:138:43, :139:23] wire _count_T_39 = popc_in[39]; // @[ALU.scala:138:43, :139:23] wire _count_T_40 = popc_in[40]; // @[ALU.scala:138:43, :139:23] wire _count_T_41 = popc_in[41]; // @[ALU.scala:138:43, :139:23] wire _count_T_42 = popc_in[42]; // @[ALU.scala:138:43, :139:23] wire _count_T_43 = popc_in[43]; // @[ALU.scala:138:43, :139:23] wire _count_T_44 = popc_in[44]; // @[ALU.scala:138:43, :139:23] wire _count_T_45 = popc_in[45]; // @[ALU.scala:138:43, :139:23] wire _count_T_46 = popc_in[46]; // @[ALU.scala:138:43, :139:23] wire _count_T_47 = popc_in[47]; // @[ALU.scala:138:43, :139:23] wire _count_T_48 = popc_in[48]; // @[ALU.scala:138:43, :139:23] wire _count_T_49 = popc_in[49]; // @[ALU.scala:138:43, :139:23] wire _count_T_50 = popc_in[50]; // @[ALU.scala:138:43, :139:23] wire _count_T_51 = popc_in[51]; // @[ALU.scala:138:43, :139:23] wire _count_T_52 = popc_in[52]; // @[ALU.scala:138:43, :139:23] wire _count_T_53 = popc_in[53]; // @[ALU.scala:138:43, :139:23] wire _count_T_54 = popc_in[54]; // @[ALU.scala:138:43, :139:23] wire _count_T_55 = popc_in[55]; // @[ALU.scala:138:43, :139:23] wire _count_T_56 = popc_in[56]; // @[ALU.scala:138:43, :139:23] wire _count_T_57 = popc_in[57]; // @[ALU.scala:138:43, :139:23] wire _count_T_58 = popc_in[58]; // @[ALU.scala:138:43, :139:23] wire _count_T_59 = popc_in[59]; // @[ALU.scala:138:43, :139:23] wire _count_T_60 = popc_in[60]; // @[ALU.scala:138:43, :139:23] wire _count_T_61 = popc_in[61]; // @[ALU.scala:138:43, :139:23] wire _count_T_62 = popc_in[62]; // @[ALU.scala:138:43, :139:23] wire _count_T_63 = popc_in[63]; // @[ALU.scala:138:43, :139:23] wire [1:0] _count_T_64 = {1'h0, _count_T} + {1'h0, _count_T_1}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_65 = _count_T_64; // @[ALU.scala:139:23] wire [1:0] _count_T_66 = {1'h0, _count_T_2} + {1'h0, _count_T_3}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_67 = _count_T_66; // @[ALU.scala:139:23] wire [2:0] _count_T_68 = {1'h0, _count_T_65} + {1'h0, _count_T_67}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_69 = _count_T_68; // @[ALU.scala:139:23] wire [1:0] _count_T_70 = {1'h0, _count_T_4} + {1'h0, _count_T_5}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_71 = _count_T_70; // @[ALU.scala:139:23] wire [1:0] _count_T_72 = {1'h0, _count_T_6} + {1'h0, _count_T_7}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_73 = _count_T_72; // @[ALU.scala:139:23] wire [2:0] _count_T_74 = {1'h0, _count_T_71} + {1'h0, _count_T_73}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_75 = _count_T_74; // @[ALU.scala:139:23] wire [3:0] _count_T_76 = {1'h0, _count_T_69} + {1'h0, _count_T_75}; // @[ALU.scala:88:26, :139:23] wire [3:0] _count_T_77 = _count_T_76; // @[ALU.scala:139:23] wire [1:0] _count_T_78 = {1'h0, _count_T_8} + {1'h0, _count_T_9}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_79 = _count_T_78; // @[ALU.scala:139:23] wire [1:0] _count_T_80 = {1'h0, _count_T_10} + {1'h0, _count_T_11}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_81 = _count_T_80; // @[ALU.scala:139:23] wire [2:0] _count_T_82 = {1'h0, _count_T_79} + {1'h0, _count_T_81}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_83 = _count_T_82; // @[ALU.scala:139:23] wire [1:0] _count_T_84 = {1'h0, _count_T_12} + {1'h0, _count_T_13}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_85 = _count_T_84; // @[ALU.scala:139:23] wire [1:0] _count_T_86 = {1'h0, _count_T_14} + {1'h0, _count_T_15}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_87 = _count_T_86; // @[ALU.scala:139:23] wire [2:0] _count_T_88 = {1'h0, _count_T_85} + {1'h0, _count_T_87}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_89 = _count_T_88; // @[ALU.scala:139:23] wire [3:0] _count_T_90 = {1'h0, _count_T_83} + {1'h0, _count_T_89}; // @[ALU.scala:88:26, :139:23] wire [3:0] _count_T_91 = _count_T_90; // @[ALU.scala:139:23] wire [4:0] _count_T_92 = {1'h0, _count_T_77} + {1'h0, _count_T_91}; // @[ALU.scala:88:26, :139:23] wire [4:0] _count_T_93 = _count_T_92; // @[ALU.scala:139:23] wire [1:0] _count_T_94 = {1'h0, _count_T_16} + {1'h0, _count_T_17}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_95 = _count_T_94; // @[ALU.scala:139:23] wire [1:0] _count_T_96 = {1'h0, _count_T_18} + {1'h0, _count_T_19}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_97 = _count_T_96; // @[ALU.scala:139:23] wire [2:0] _count_T_98 = {1'h0, _count_T_95} + {1'h0, _count_T_97}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_99 = _count_T_98; // @[ALU.scala:139:23] wire [1:0] _count_T_100 = {1'h0, _count_T_20} + {1'h0, _count_T_21}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_101 = _count_T_100; // @[ALU.scala:139:23] wire [1:0] _count_T_102 = {1'h0, _count_T_22} + {1'h0, _count_T_23}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_103 = _count_T_102; // @[ALU.scala:139:23] wire [2:0] _count_T_104 = {1'h0, _count_T_101} + {1'h0, _count_T_103}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_105 = _count_T_104; // @[ALU.scala:139:23] wire [3:0] _count_T_106 = {1'h0, _count_T_99} + {1'h0, _count_T_105}; // @[ALU.scala:88:26, :139:23] wire [3:0] _count_T_107 = _count_T_106; // @[ALU.scala:139:23] wire [1:0] _count_T_108 = {1'h0, _count_T_24} + {1'h0, _count_T_25}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_109 = _count_T_108; // @[ALU.scala:139:23] wire [1:0] _count_T_110 = {1'h0, _count_T_26} + {1'h0, _count_T_27}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_111 = _count_T_110; // @[ALU.scala:139:23] wire [2:0] _count_T_112 = {1'h0, _count_T_109} + {1'h0, _count_T_111}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_113 = _count_T_112; // @[ALU.scala:139:23] wire [1:0] _count_T_114 = {1'h0, _count_T_28} + {1'h0, _count_T_29}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_115 = _count_T_114; // @[ALU.scala:139:23] wire [1:0] _count_T_116 = {1'h0, _count_T_30} + {1'h0, _count_T_31}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_117 = _count_T_116; // @[ALU.scala:139:23] wire [2:0] _count_T_118 = {1'h0, _count_T_115} + {1'h0, _count_T_117}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_119 = _count_T_118; // @[ALU.scala:139:23] wire [3:0] _count_T_120 = {1'h0, _count_T_113} + {1'h0, _count_T_119}; // @[ALU.scala:88:26, :139:23] wire [3:0] _count_T_121 = _count_T_120; // @[ALU.scala:139:23] wire [4:0] _count_T_122 = {1'h0, _count_T_107} + {1'h0, _count_T_121}; // @[ALU.scala:88:26, :139:23] wire [4:0] _count_T_123 = _count_T_122; // @[ALU.scala:139:23] wire [5:0] _count_T_124 = {1'h0, _count_T_93} + {1'h0, _count_T_123}; // @[ALU.scala:88:26, :139:23] wire [5:0] _count_T_125 = _count_T_124; // @[ALU.scala:139:23] wire [1:0] _count_T_126 = {1'h0, _count_T_32} + {1'h0, _count_T_33}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_127 = _count_T_126; // @[ALU.scala:139:23] wire [1:0] _count_T_128 = {1'h0, _count_T_34} + {1'h0, _count_T_35}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_129 = _count_T_128; // @[ALU.scala:139:23] wire [2:0] _count_T_130 = {1'h0, _count_T_127} + {1'h0, _count_T_129}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_131 = _count_T_130; // @[ALU.scala:139:23] wire [1:0] _count_T_132 = {1'h0, _count_T_36} + {1'h0, _count_T_37}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_133 = _count_T_132; // @[ALU.scala:139:23] wire [1:0] _count_T_134 = {1'h0, _count_T_38} + {1'h0, _count_T_39}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_135 = _count_T_134; // @[ALU.scala:139:23] wire [2:0] _count_T_136 = {1'h0, _count_T_133} + {1'h0, _count_T_135}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_137 = _count_T_136; // @[ALU.scala:139:23] wire [3:0] _count_T_138 = {1'h0, _count_T_131} + {1'h0, _count_T_137}; // @[ALU.scala:88:26, :139:23] wire [3:0] _count_T_139 = _count_T_138; // @[ALU.scala:139:23] wire [1:0] _count_T_140 = {1'h0, _count_T_40} + {1'h0, _count_T_41}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_141 = _count_T_140; // @[ALU.scala:139:23] wire [1:0] _count_T_142 = {1'h0, _count_T_42} + {1'h0, _count_T_43}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_143 = _count_T_142; // @[ALU.scala:139:23] wire [2:0] _count_T_144 = {1'h0, _count_T_141} + {1'h0, _count_T_143}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_145 = _count_T_144; // @[ALU.scala:139:23] wire [1:0] _count_T_146 = {1'h0, _count_T_44} + {1'h0, _count_T_45}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_147 = _count_T_146; // @[ALU.scala:139:23] wire [1:0] _count_T_148 = {1'h0, _count_T_46} + {1'h0, _count_T_47}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_149 = _count_T_148; // @[ALU.scala:139:23] wire [2:0] _count_T_150 = {1'h0, _count_T_147} + {1'h0, _count_T_149}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_151 = _count_T_150; // @[ALU.scala:139:23] wire [3:0] _count_T_152 = {1'h0, _count_T_145} + {1'h0, _count_T_151}; // @[ALU.scala:88:26, :139:23] wire [3:0] _count_T_153 = _count_T_152; // @[ALU.scala:139:23] wire [4:0] _count_T_154 = {1'h0, _count_T_139} + {1'h0, _count_T_153}; // @[ALU.scala:88:26, :139:23] wire [4:0] _count_T_155 = _count_T_154; // @[ALU.scala:139:23] wire [1:0] _count_T_156 = {1'h0, _count_T_48} + {1'h0, _count_T_49}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_157 = _count_T_156; // @[ALU.scala:139:23] wire [1:0] _count_T_158 = {1'h0, _count_T_50} + {1'h0, _count_T_51}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_159 = _count_T_158; // @[ALU.scala:139:23] wire [2:0] _count_T_160 = {1'h0, _count_T_157} + {1'h0, _count_T_159}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_161 = _count_T_160; // @[ALU.scala:139:23] wire [1:0] _count_T_162 = {1'h0, _count_T_52} + {1'h0, _count_T_53}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_163 = _count_T_162; // @[ALU.scala:139:23] wire [1:0] _count_T_164 = {1'h0, _count_T_54} + {1'h0, _count_T_55}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_165 = _count_T_164; // @[ALU.scala:139:23] wire [2:0] _count_T_166 = {1'h0, _count_T_163} + {1'h0, _count_T_165}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_167 = _count_T_166; // @[ALU.scala:139:23] wire [3:0] _count_T_168 = {1'h0, _count_T_161} + {1'h0, _count_T_167}; // @[ALU.scala:88:26, :139:23] wire [3:0] _count_T_169 = _count_T_168; // @[ALU.scala:139:23] wire [1:0] _count_T_170 = {1'h0, _count_T_56} + {1'h0, _count_T_57}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_171 = _count_T_170; // @[ALU.scala:139:23] wire [1:0] _count_T_172 = {1'h0, _count_T_58} + {1'h0, _count_T_59}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_173 = _count_T_172; // @[ALU.scala:139:23] wire [2:0] _count_T_174 = {1'h0, _count_T_171} + {1'h0, _count_T_173}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_175 = _count_T_174; // @[ALU.scala:139:23] wire [1:0] _count_T_176 = {1'h0, _count_T_60} + {1'h0, _count_T_61}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_177 = _count_T_176; // @[ALU.scala:139:23] wire [1:0] _count_T_178 = {1'h0, _count_T_62} + {1'h0, _count_T_63}; // @[ALU.scala:88:26, :139:23] wire [1:0] _count_T_179 = _count_T_178; // @[ALU.scala:139:23] wire [2:0] _count_T_180 = {1'h0, _count_T_177} + {1'h0, _count_T_179}; // @[ALU.scala:88:26, :139:23] wire [2:0] _count_T_181 = _count_T_180; // @[ALU.scala:139:23] wire [3:0] _count_T_182 = {1'h0, _count_T_175} + {1'h0, _count_T_181}; // @[ALU.scala:88:26, :139:23] wire [3:0] _count_T_183 = _count_T_182; // @[ALU.scala:139:23] wire [4:0] _count_T_184 = {1'h0, _count_T_169} + {1'h0, _count_T_183}; // @[ALU.scala:88:26, :139:23] wire [4:0] _count_T_185 = _count_T_184; // @[ALU.scala:139:23] wire [5:0] _count_T_186 = {1'h0, _count_T_155} + {1'h0, _count_T_185}; // @[ALU.scala:88:26, :139:23] wire [5:0] _count_T_187 = _count_T_186; // @[ALU.scala:139:23] wire [6:0] _count_T_188 = {1'h0, _count_T_125} + {1'h0, _count_T_187}; // @[ALU.scala:88:26, :139:23] wire [6:0] count = _count_T_188; // @[ALU.scala:139:23] wire [7:0] _in1_bytes_T; // @[ALU.scala:140:34] wire [7:0] _in1_bytes_T_1; // @[ALU.scala:140:34] wire [7:0] _rev8_WIRE_7 = in1_bytes_0; // @[ALU.scala:140:34, :142:21] wire [7:0] _in1_bytes_T_2; // @[ALU.scala:140:34] wire [7:0] _rev8_WIRE_6 = in1_bytes_1; // @[ALU.scala:140:34, :142:21] wire [7:0] _in1_bytes_T_3; // @[ALU.scala:140:34] wire [7:0] _rev8_WIRE_5 = in1_bytes_2; // @[ALU.scala:140:34, :142:21] wire [7:0] _in1_bytes_T_4; // @[ALU.scala:140:34] wire [7:0] _rev8_WIRE_4 = in1_bytes_3; // @[ALU.scala:140:34, :142:21] wire [7:0] _in1_bytes_T_5; // @[ALU.scala:140:34] wire [7:0] _rev8_WIRE_3 = in1_bytes_4; // @[ALU.scala:140:34, :142:21] wire [7:0] _in1_bytes_T_6; // @[ALU.scala:140:34] wire [7:0] _rev8_WIRE_2 = in1_bytes_5; // @[ALU.scala:140:34, :142:21] wire [7:0] _in1_bytes_T_7; // @[ALU.scala:140:34] wire [7:0] _rev8_WIRE_1 = in1_bytes_6; // @[ALU.scala:140:34, :142:21] wire [7:0] in1_bytes_7; // @[ALU.scala:140:34] wire [7:0] _rev8_WIRE_0 = in1_bytes_7; // @[ALU.scala:140:34, :142:21] assign _in1_bytes_T = _in1_bytes_WIRE[7:0]; // @[ALU.scala:140:34] assign in1_bytes_0 = _in1_bytes_T; // @[ALU.scala:140:34] assign _in1_bytes_T_1 = _in1_bytes_WIRE[15:8]; // @[ALU.scala:140:34] assign in1_bytes_1 = _in1_bytes_T_1; // @[ALU.scala:140:34] assign _in1_bytes_T_2 = _in1_bytes_WIRE[23:16]; // @[ALU.scala:140:34] assign in1_bytes_2 = _in1_bytes_T_2; // @[ALU.scala:140:34] assign _in1_bytes_T_3 = _in1_bytes_WIRE[31:24]; // @[ALU.scala:140:34] assign in1_bytes_3 = _in1_bytes_T_3; // @[ALU.scala:140:34] assign _in1_bytes_T_4 = _in1_bytes_WIRE[39:32]; // @[ALU.scala:140:34] assign in1_bytes_4 = _in1_bytes_T_4; // @[ALU.scala:140:34] assign _in1_bytes_T_5 = _in1_bytes_WIRE[47:40]; // @[ALU.scala:140:34] assign in1_bytes_5 = _in1_bytes_T_5; // @[ALU.scala:140:34] assign _in1_bytes_T_6 = _in1_bytes_WIRE[55:48]; // @[ALU.scala:140:34] assign in1_bytes_6 = _in1_bytes_T_6; // @[ALU.scala:140:34] assign _in1_bytes_T_7 = _in1_bytes_WIRE[63:56]; // @[ALU.scala:140:34] assign in1_bytes_7 = _in1_bytes_T_7; // @[ALU.scala:140:34] wire _orcb_T = |in1_bytes_0; // @[ALU.scala:140:34, :141:51] wire [7:0] _orcb_T_1 = {8{_orcb_T}}; // @[ALU.scala:141:{45,51}] wire [7:0] _orcb_WIRE_0 = _orcb_T_1; // @[ALU.scala:141:{21,45}] wire _orcb_T_2 = |in1_bytes_1; // @[ALU.scala:140:34, :141:51] wire [7:0] _orcb_T_3 = {8{_orcb_T_2}}; // @[ALU.scala:141:{45,51}] wire [7:0] _orcb_WIRE_1 = _orcb_T_3; // @[ALU.scala:141:{21,45}] wire _orcb_T_4 = |in1_bytes_2; // @[ALU.scala:140:34, :141:51] wire [7:0] _orcb_T_5 = {8{_orcb_T_4}}; // @[ALU.scala:141:{45,51}] wire [7:0] _orcb_WIRE_2 = _orcb_T_5; // @[ALU.scala:141:{21,45}] wire _orcb_T_6 = |in1_bytes_3; // @[ALU.scala:140:34, :141:51] wire [7:0] _orcb_T_7 = {8{_orcb_T_6}}; // @[ALU.scala:141:{45,51}] wire [7:0] _orcb_WIRE_3 = _orcb_T_7; // @[ALU.scala:141:{21,45}] wire _orcb_T_8 = |in1_bytes_4; // @[ALU.scala:140:34, :141:51] wire [7:0] _orcb_T_9 = {8{_orcb_T_8}}; // @[ALU.scala:141:{45,51}] wire [7:0] _orcb_WIRE_4 = _orcb_T_9; // @[ALU.scala:141:{21,45}] wire _orcb_T_10 = |in1_bytes_5; // @[ALU.scala:140:34, :141:51] wire [7:0] _orcb_T_11 = {8{_orcb_T_10}}; // @[ALU.scala:141:{45,51}] wire [7:0] _orcb_WIRE_5 = _orcb_T_11; // @[ALU.scala:141:{21,45}] wire _orcb_T_12 = |in1_bytes_6; // @[ALU.scala:140:34, :141:51] wire [7:0] _orcb_T_13 = {8{_orcb_T_12}}; // @[ALU.scala:141:{45,51}] wire [7:0] _orcb_WIRE_6 = _orcb_T_13; // @[ALU.scala:141:{21,45}] wire _orcb_T_14 = |in1_bytes_7; // @[ALU.scala:140:34, :141:51] wire [7:0] _orcb_T_15 = {8{_orcb_T_14}}; // @[ALU.scala:141:{45,51}] wire [7:0] _orcb_WIRE_7 = _orcb_T_15; // @[ALU.scala:141:{21,45}] wire [15:0] orcb_lo_lo = {_orcb_WIRE_1, _orcb_WIRE_0}; // @[ALU.scala:141:{21,62}] wire [15:0] orcb_lo_hi = {_orcb_WIRE_3, _orcb_WIRE_2}; // @[ALU.scala:141:{21,62}] wire [31:0] orcb_lo = {orcb_lo_hi, orcb_lo_lo}; // @[ALU.scala:141:62] wire [15:0] orcb_hi_lo = {_orcb_WIRE_5, _orcb_WIRE_4}; // @[ALU.scala:141:{21,62}] wire [15:0] orcb_hi_hi = {_orcb_WIRE_7, _orcb_WIRE_6}; // @[ALU.scala:141:{21,62}] wire [31:0] orcb_hi = {orcb_hi_hi, orcb_hi_lo}; // @[ALU.scala:141:62] wire [63:0] orcb = {orcb_hi, orcb_lo}; // @[ALU.scala:141:62] wire [15:0] rev8_lo_lo = {_rev8_WIRE_1, _rev8_WIRE_0}; // @[ALU.scala:142:{21,41}] wire [15:0] rev8_lo_hi = {_rev8_WIRE_3, _rev8_WIRE_2}; // @[ALU.scala:142:{21,41}] wire [31:0] rev8_lo = {rev8_lo_hi, rev8_lo_lo}; // @[ALU.scala:142:41] wire [15:0] rev8_hi_lo = {_rev8_WIRE_5, _rev8_WIRE_4}; // @[ALU.scala:142:{21,41}] wire [15:0] rev8_hi_hi = {_rev8_WIRE_7, _rev8_WIRE_6}; // @[ALU.scala:142:{21,41}] wire [31:0] rev8_hi = {rev8_hi_hi, rev8_hi_lo}; // @[ALU.scala:142:41] wire [63:0] rev8 = {rev8_hi, rev8_lo}; // @[ALU.scala:142:41] wire [11:0] _unary_T = io_in2_0[11:0]; // @[ALU.scala:83:7, :143:31] wire [15:0] _unary_T_1 = io_in1_0[15:0]; // @[ALU.scala:83:7, :146:22] wire [15:0] _unary_T_8 = io_in1_0[15:0]; // @[ALU.scala:83:7, :146:22, :148:51] wire _unary_T_2 = io_in1_0[7]; // @[ALU.scala:83:7, :147:35] wire [55:0] _unary_T_3 = {56{_unary_T_2}}; // @[ALU.scala:147:{20,35}] wire [7:0] _unary_T_4 = io_in1_0[7:0]; // @[ALU.scala:83:7, :147:49] wire [63:0] _unary_T_5 = {_unary_T_3, _unary_T_4}; // @[ALU.scala:147:{20,40,49}] wire _unary_T_6 = io_in1_0[15]; // @[ALU.scala:83:7, :148:36] wire [47:0] _unary_T_7 = {48{_unary_T_6}}; // @[ALU.scala:148:{20,36}] wire [63:0] _unary_T_9 = {_unary_T_7, _unary_T_8}; // @[ALU.scala:148:{20,42,51}] wire _unary_T_10 = _unary_T == 12'h287; // @[ALU.scala:143:{31,45}] wire [63:0] _unary_T_11 = _unary_T_10 ? orcb : {57'h0, count}; // @[ALU.scala:139:23, :141:62, :143:45] wire _unary_T_12 = _unary_T == 12'h6B8; // @[ALU.scala:143:{31,45}] wire [63:0] _unary_T_13 = _unary_T_12 ? rev8 : _unary_T_11; // @[ALU.scala:142:41, :143:45] wire _unary_T_14 = _unary_T == 12'h80; // @[ALU.scala:143:{31,45}] wire [63:0] _unary_T_15 = _unary_T_14 ? {48'h0, _unary_T_1} : _unary_T_13; // @[ALU.scala:143:45, :146:22] wire _unary_T_16 = _unary_T == 12'h604; // @[ALU.scala:143:{31,45}] wire [63:0] _unary_T_17 = _unary_T_16 ? _unary_T_5 : _unary_T_15; // @[ALU.scala:143:45, :147:40] wire _unary_T_18 = _unary_T == 12'h605; // @[ALU.scala:143:{31,45}] wire [63:0] unary = _unary_T_18 ? _unary_T_9 : _unary_T_17; // @[ALU.scala:143:45, :148:42] wire [63:0] maxmin_out = io_cmp_out ? io_in2_0 : io_in1_0; // @[ALU.scala:83:7, :152:23] wire _rot_shamt_T = ~io_dw_0; // @[ALU.scala:83:7, :130:32, :155:29] wire [6:0] _rot_shamt_T_1 = _rot_shamt_T ? 7'h20 : 7'h40; // @[ALU.scala:155:{22,29}] wire [7:0] _rot_shamt_T_2 = {1'h0, _rot_shamt_T_1} - {2'h0, shamt}; // @[package.scala:16:47] wire [6:0] rot_shamt = _rot_shamt_T_2[6:0]; // @[ALU.scala:155:54] wire [63:0] _rotin_T_4 = {32'h0, _rotin_T_3}; // @[ALU.scala:156:44] wire [63:0] _rotin_T_6 = {_rotin_T_5, 32'h0}; // @[ALU.scala:156:44] wire [63:0] _rotin_T_8 = _rotin_T_6 & 64'hFFFFFFFF00000000; // @[ALU.scala:156:44] wire [63:0] _rotin_T_9 = _rotin_T_4 | _rotin_T_8; // @[ALU.scala:156:44] wire [47:0] _rotin_T_13 = _rotin_T_9[63:16]; // @[ALU.scala:156:44] wire [63:0] _rotin_T_14 = {16'h0, _rotin_T_13 & 48'hFFFF0000FFFF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [47:0] _rotin_T_15 = _rotin_T_9[47:0]; // @[ALU.scala:156:44] wire [63:0] _rotin_T_16 = {_rotin_T_15, 16'h0}; // @[ALU.scala:106:46, :156:44] wire [63:0] _rotin_T_18 = _rotin_T_16 & 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_19 = _rotin_T_14 | _rotin_T_18; // @[ALU.scala:156:44] wire [55:0] _rotin_T_23 = _rotin_T_19[63:8]; // @[ALU.scala:156:44] wire [63:0] _rotin_T_24 = {8'h0, _rotin_T_23 & 56'hFF00FF00FF00FF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [55:0] _rotin_T_25 = _rotin_T_19[55:0]; // @[ALU.scala:156:44] wire [63:0] _rotin_T_26 = {_rotin_T_25, 8'h0}; // @[ALU.scala:156:44] wire [63:0] _rotin_T_28 = _rotin_T_26 & 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_29 = _rotin_T_24 | _rotin_T_28; // @[ALU.scala:156:44] wire [59:0] _rotin_T_33 = _rotin_T_29[63:4]; // @[ALU.scala:156:44] wire [63:0] _rotin_T_34 = {4'h0, _rotin_T_33 & 60'hF0F0F0F0F0F0F0F}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [59:0] _rotin_T_35 = _rotin_T_29[59:0]; // @[ALU.scala:156:44] wire [63:0] _rotin_T_36 = {_rotin_T_35, 4'h0}; // @[ALU.scala:106:46, :156:44] wire [63:0] _rotin_T_38 = _rotin_T_36 & 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_39 = _rotin_T_34 | _rotin_T_38; // @[ALU.scala:156:44] wire [61:0] _rotin_T_43 = _rotin_T_39[63:2]; // @[ALU.scala:156:44] wire [63:0] _rotin_T_44 = {2'h0, _rotin_T_43 & 62'h3333333333333333}; // @[package.scala:16:47] wire [61:0] _rotin_T_45 = _rotin_T_39[61:0]; // @[ALU.scala:156:44] wire [63:0] _rotin_T_46 = {_rotin_T_45, 2'h0}; // @[package.scala:16:47] wire [63:0] _rotin_T_48 = _rotin_T_46 & 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_49 = _rotin_T_44 | _rotin_T_48; // @[ALU.scala:156:44] wire [62:0] _rotin_T_53 = _rotin_T_49[63:1]; // @[ALU.scala:156:44] wire [63:0] _rotin_T_54 = {1'h0, _rotin_T_53 & 63'h5555555555555555}; // @[ALU.scala:88:26, :106:46, :108:24, :132:19, :156:44, :158:25] wire [62:0] _rotin_T_55 = _rotin_T_49[62:0]; // @[ALU.scala:156:44] wire [63:0] _rotin_T_56 = {_rotin_T_55, 1'h0}; // @[ALU.scala:88:26, :156:44] wire [63:0] _rotin_T_58 = _rotin_T_56 & 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotin_T_59 = _rotin_T_54 | _rotin_T_58; // @[ALU.scala:156:44] wire [63:0] rotin = _rotin_T ? shin_r : _rotin_T_59; // @[ALU.scala:104:18, :156:{18,24,44}] wire [63:0] _rotout_r_T = rotin >> rot_shamt; // @[ALU.scala:155:54, :156:18, :157:25] wire [63:0] rotout_r = _rotout_r_T; // @[ALU.scala:157:{25,38}] wire [31:0] _rotout_l_T_2 = rotout_r[63:32]; // @[ALU.scala:157:38, :158:25] wire [63:0] _rotout_l_T_3 = {32'h0, _rotout_l_T_2}; // @[ALU.scala:158:25] wire [31:0] _rotout_l_T_4 = rotout_r[31:0]; // @[ALU.scala:157:38, :158:25] wire [63:0] _rotout_l_T_5 = {_rotout_l_T_4, 32'h0}; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_7 = _rotout_l_T_5 & 64'hFFFFFFFF00000000; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_8 = _rotout_l_T_3 | _rotout_l_T_7; // @[ALU.scala:158:25] wire [47:0] _rotout_l_T_12 = _rotout_l_T_8[63:16]; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_13 = {16'h0, _rotout_l_T_12 & 48'hFFFF0000FFFF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [47:0] _rotout_l_T_14 = _rotout_l_T_8[47:0]; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_15 = {_rotout_l_T_14, 16'h0}; // @[ALU.scala:106:46, :158:25] wire [63:0] _rotout_l_T_17 = _rotout_l_T_15 & 64'hFFFF0000FFFF0000; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_18 = _rotout_l_T_13 | _rotout_l_T_17; // @[ALU.scala:158:25] wire [55:0] _rotout_l_T_22 = _rotout_l_T_18[63:8]; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_23 = {8'h0, _rotout_l_T_22 & 56'hFF00FF00FF00FF}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [55:0] _rotout_l_T_24 = _rotout_l_T_18[55:0]; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_25 = {_rotout_l_T_24, 8'h0}; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_27 = _rotout_l_T_25 & 64'hFF00FF00FF00FF00; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_28 = _rotout_l_T_23 | _rotout_l_T_27; // @[ALU.scala:158:25] wire [59:0] _rotout_l_T_32 = _rotout_l_T_28[63:4]; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_33 = {4'h0, _rotout_l_T_32 & 60'hF0F0F0F0F0F0F0F}; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [59:0] _rotout_l_T_34 = _rotout_l_T_28[59:0]; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_35 = {_rotout_l_T_34, 4'h0}; // @[ALU.scala:106:46, :158:25] wire [63:0] _rotout_l_T_37 = _rotout_l_T_35 & 64'hF0F0F0F0F0F0F0F0; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_38 = _rotout_l_T_33 | _rotout_l_T_37; // @[ALU.scala:158:25] wire [61:0] _rotout_l_T_42 = _rotout_l_T_38[63:2]; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_43 = {2'h0, _rotout_l_T_42 & 62'h3333333333333333}; // @[package.scala:16:47] wire [61:0] _rotout_l_T_44 = _rotout_l_T_38[61:0]; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_45 = {_rotout_l_T_44, 2'h0}; // @[package.scala:16:47] wire [63:0] _rotout_l_T_47 = _rotout_l_T_45 & 64'hCCCCCCCCCCCCCCCC; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] _rotout_l_T_48 = _rotout_l_T_43 | _rotout_l_T_47; // @[ALU.scala:158:25] wire [62:0] _rotout_l_T_52 = _rotout_l_T_48[63:1]; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_53 = {1'h0, _rotout_l_T_52 & 63'h5555555555555555}; // @[ALU.scala:88:26, :106:46, :108:24, :132:19, :156:44, :158:25] wire [62:0] _rotout_l_T_54 = _rotout_l_T_48[62:0]; // @[ALU.scala:158:25] wire [63:0] _rotout_l_T_55 = {_rotout_l_T_54, 1'h0}; // @[ALU.scala:88:26, :158:25] wire [63:0] _rotout_l_T_57 = _rotout_l_T_55 & 64'hAAAAAAAAAAAAAAAA; // @[ALU.scala:106:46, :108:24, :132:19, :156:44, :158:25] wire [63:0] rotout_l = _rotout_l_T_53 | _rotout_l_T_57; // @[ALU.scala:158:25] wire [63:0] _rotout_T_1 = _rotout_T ? rotout_r : rotout_l; // @[ALU.scala:157:38, :158:25, :159:{19,25}] wire [63:0] _rotout_T_3 = _rotout_T_2 ? shout_l : shout_r; // @[ALU.scala:107:73, :108:24, :159:{55,61}] wire [63:0] rotout = _rotout_T_1 | _rotout_T_3; // @[ALU.scala:159:{19,50,55}] wire _out_T = io_fn_0 == 5'h0; // @[ALU.scala:83:7, :161:47] wire [63:0] _out_T_1 = _out_T ? io_adder_out : shift_logic; // @[ALU.scala:83:7, :123:52, :161:47] wire _out_T_2 = io_fn_0 == 5'hA; // @[ALU.scala:83:7, :161:47] wire [63:0] _out_T_3 = _out_T_2 ? io_adder_out : _out_T_1; // @[ALU.scala:83:7, :161:47] wire _out_T_4 = io_fn_0 == 5'h10; // @[ALU.scala:83:7, :161:47] wire [63:0] _out_T_5 = _out_T_4 ? unary : _out_T_3; // @[ALU.scala:143:45, :161:47] wire _out_T_6 = io_fn_0 == 5'h1C; // @[ALU.scala:83:7, :161:47] wire [63:0] _out_T_7 = _out_T_6 ? maxmin_out : _out_T_5; // @[ALU.scala:152:23, :161:47] wire _out_T_8 = io_fn_0 == 5'h1D; // @[ALU.scala:83:7, :161:47] wire [63:0] _out_T_9 = _out_T_8 ? maxmin_out : _out_T_7; // @[ALU.scala:152:23, :161:47] wire _out_T_10 = io_fn_0 == 5'h1E; // @[ALU.scala:83:7, :161:47] wire [63:0] _out_T_11 = _out_T_10 ? maxmin_out : _out_T_9; // @[ALU.scala:152:23, :161:47] wire _out_T_12 = &io_fn_0; // @[ALU.scala:83:7, :161:47] wire [63:0] _out_T_13 = _out_T_12 ? maxmin_out : _out_T_11; // @[ALU.scala:152:23, :161:47] wire _out_T_14 = io_fn_0 == 5'h11; // @[ALU.scala:83:7, :161:47] wire [63:0] _out_T_15 = _out_T_14 ? rotout : _out_T_13; // @[ALU.scala:159:50, :161:47] wire [63:0] out = _out_T_16 ? rotout : _out_T_15; // @[ALU.scala:159:50, :161:47] wire _io_out_T = out[31]; // @[ALU.scala:161:47, :178:56] wire [31:0] _io_out_T_1 = {32{_io_out_T}}; // @[ALU.scala:178:{48,56}] wire [31:0] _io_out_T_2 = out[31:0]; // @[ALU.scala:161:47, :178:66] wire [63:0] _io_out_T_3 = {_io_out_T_1, _io_out_T_2}; // @[ALU.scala:178:{43,48,66}] assign io_out_0 = io_dw_0 ? out : _io_out_T_3; // @[ALU.scala:83:7, :161:47, :175:10, :178:{28,37,43}] assign io_out = io_out_0; // @[ALU.scala:83:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_84( // @[AsyncQueue.scala:58:7] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in = 1'h1; // @[ShiftReg.scala:45:23] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_97 io_out_sink_valid_0 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } } File Arbiter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ object TLArbiter { // (valids, select) => readys type Policy = (Integer, UInt, Bool) => UInt val lowestIndexFirst: Policy = (width, valids, select) => ~(leftOR(valids) << 1)(width-1, 0) val highestIndexFirst: Policy = (width, valids, select) => ~((rightOR(valids) >> 1).pad(width)) val roundRobin: Policy = (width, valids, select) => if (width == 1) 1.U(1.W) else { val valid = valids(width-1, 0) assert (valid === valids) val mask = RegInit(((BigInt(1) << width)-1).U(width-1,0)) val filter = Cat(valid & ~mask, valid) val unready = (rightOR(filter, width*2, width) >> 1) | (mask << width) val readys = ~((unready >> width) & unready(width-1, 0)) when (select && valid.orR) { mask := leftOR(readys & valid, width) } readys(width-1, 0) } def lowestFromSeq[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: Seq[DecoupledIO[T]]): Unit = { apply(lowestIndexFirst)(sink, sources.map(s => (edge.numBeats1(s.bits), s)):_*) } def lowest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(lowestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def highest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(highestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def robin[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(roundRobin)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def apply[T <: Data](policy: Policy)(sink: DecoupledIO[T], sources: (UInt, DecoupledIO[T])*): Unit = { if (sources.isEmpty) { sink.bits := DontCare } else if (sources.size == 1) { sink :<>= sources.head._2 } else { val pairs = sources.toList val beatsIn = pairs.map(_._1) val sourcesIn = pairs.map(_._2) // The number of beats which remain to be sent val beatsLeft = RegInit(0.U) val idle = beatsLeft === 0.U val latch = idle && sink.ready // winner (if any) claims sink // Who wants access to the sink? val valids = sourcesIn.map(_.valid) // Arbitrate amongst the requests val readys = VecInit(policy(valids.size, Cat(valids.reverse), latch).asBools) // Which request wins arbitration? val winner = VecInit((readys zip valids) map { case (r,v) => r&&v }) // Confirm the policy works properly require (readys.size == valids.size) // Never two winners val prefixOR = winner.scanLeft(false.B)(_||_).init assert((prefixOR zip winner) map { case (p,w) => !p || !w } reduce {_ && _}) // If there was any request, there is a winner assert (!valids.reduce(_||_) || winner.reduce(_||_)) // Track remaining beats val maskedBeats = (winner zip beatsIn) map { case (w,b) => Mux(w, b, 0.U) } val initBeats = maskedBeats.reduce(_ | _) // no winner => 0 beats beatsLeft := Mux(latch, initBeats, beatsLeft - sink.fire) // The one-hot source granted access in the previous cycle val state = RegInit(VecInit(Seq.fill(sources.size)(false.B))) val muxState = Mux(idle, winner, state) state := muxState val allowed = Mux(idle, readys, state) (sourcesIn zip allowed) foreach { case (s, r) => s.ready := sink.ready && r } sink.valid := Mux(idle, valids.reduce(_||_), Mux1H(state, valids)) sink.bits :<= Mux1H(muxState, sourcesIn.map(_.bits)) } } } // Synthesizable unit tests import freechips.rocketchip.unittest._ abstract class DecoupledArbiterTest( policy: TLArbiter.Policy, txns: Int, timeout: Int, val numSources: Int, beatsLeftFromIdx: Int => UInt) (implicit p: Parameters) extends UnitTest(timeout) { val sources = Wire(Vec(numSources, DecoupledIO(UInt(log2Ceil(numSources).W)))) dontTouch(sources.suggestName("sources")) val sink = Wire(DecoupledIO(UInt(log2Ceil(numSources).W))) dontTouch(sink.suggestName("sink")) val count = RegInit(0.U(log2Ceil(txns).W)) val lfsr = LFSR(16, true.B) sources.zipWithIndex.map { case (z, i) => z.bits := i.U } TLArbiter(policy)(sink, sources.zipWithIndex.map { case (z, i) => (beatsLeftFromIdx(i), z) }:_*) count := count + 1.U io.finished := count >= txns.U } /** This tests that when a specific pattern of source valids are driven, * a new index from amongst that pattern is always selected, * unless one of those sources takes multiple beats, * in which case the same index should be selected until the arbiter goes idle. */ class TLDecoupledArbiterRobinTest(txns: Int = 128, timeout: Int = 500000, print: Boolean = false) (implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.roundRobin, txns, timeout, 6, i => i.U) { val lastWinner = RegInit((numSources+1).U) val beatsLeft = RegInit(0.U(log2Ceil(numSources).W)) val first = lastWinner > numSources.U val valid = lfsr(0) val ready = lfsr(15) sink.ready := ready sources.zipWithIndex.map { // pattern: every even-indexed valid is driven the same random way case (s, i) => s.valid := (if (i % 2 == 1) false.B else valid) } when (sink.fire) { if (print) { printf("TestRobin: %d\n", sink.bits) } when (beatsLeft === 0.U) { assert(lastWinner =/= sink.bits, "Round robin did not pick a new idx despite one being valid.") lastWinner := sink.bits beatsLeft := sink.bits } .otherwise { assert(lastWinner === sink.bits, "Round robin did not pick the same index over multiple beats") beatsLeft := beatsLeft - 1.U } } if (print) { when (!sink.fire) { printf("TestRobin: idle (%d %d)\n", valid, ready) } } } /** This tests that the lowest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterLowestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.lowestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertLowest(id: Int): Unit = { when (sources(id).valid) { assert((numSources-1 until id by -1).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a higher valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertLowest(_)) } } /** This tests that the highest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterHighestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.highestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertHighest(id: Int): Unit = { when (sources(id).valid) { assert((0 until id).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a lower valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertHighest(_)) } } File Xbar.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue} import freechips.rocketchip.util.BundleField // Trades off slave port proximity against routing resource cost object ForceFanout { def apply[T]( a: TriStateValue = TriStateValue.unset, b: TriStateValue = TriStateValue.unset, c: TriStateValue = TriStateValue.unset, d: TriStateValue = TriStateValue.unset, e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) = { body(p.alterPartial { case ForceFanoutKey => p(ForceFanoutKey) match { case ForceFanoutParams(pa, pb, pc, pd, pe) => ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe)) } }) } } private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean) private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false)) class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule { val node = new TLNexusNode( clientFn = { seq => seq(0).v1copy( echoFields = BundleField.union(seq.flatMap(_.echoFields)), requestFields = BundleField.union(seq.flatMap(_.requestFields)), responseKeys = seq.flatMap(_.responseKeys).distinct, minLatency = seq.map(_.minLatency).min, clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) => port.clients map { client => client.v1copy( sourceId = client.sourceId.shift(range.start) )} } ) }, managerFn = { seq => val fifoIdFactory = TLXbar.relabeler() seq(0).v1copy( responseFields = BundleField.union(seq.flatMap(_.responseFields)), requestKeys = seq.flatMap(_.requestKeys).distinct, minLatency = seq.map(_.minLatency).min, endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max, managers = seq.flatMap { port => require (port.beatBytes == seq(0).beatBytes, s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B") val fifoIdMapper = fifoIdFactory() port.managers map { manager => manager.v1copy( fifoId = manager.fifoId.map(fifoIdMapper(_)) )} } ) } ){ override def circuitIdentity = outputs.size == 1 && inputs.size == 1 } lazy val module = new Impl class Impl extends LazyModuleImp(this) { if ((node.in.size * node.out.size) > (8*32)) { println (s"!!! WARNING !!!") println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.") println (s"!!! WARNING !!!") } val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle)) override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_") TLXbar.circuit(policy, node.in, node.out) } } object TLXbar { def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId)) def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId)) def assignRanges(sizes: Seq[Int]) = { val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) } val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions val ranges = (tuples zip starts) map { case ((sz, i), st) => (if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i) } ranges.sortBy(_._2).map(_._1) // Restore orignal order } def relabeler() = { var idFactory = 0 () => { val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int] (x: Int) => { if (fifoMap.contains(x)) fifoMap(x) else { val out = idFactory idFactory = idFactory + 1 fifoMap += (x -> out) out } } } } def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) { val (io_in, edgesIn) = seqIn.unzip val (io_out, edgesOut) = seqOut.unzip // Not every master need connect to every slave on every channel; determine which connections are necessary val reachableIO = edgesIn.map { cp => edgesOut.map { mp => cp.client.clients.exists { c => mp.manager.managers.exists { m => c.visibility.exists { ca => m.address.exists { ma => ca.overlaps(ma)}}}} }.toVector}.toVector val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) => (edgesOut zip reachableO).map { case (mp, reachable) => reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED) }.toVector}.toVector val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) => (edgesOut zip reachableO).map { case (mp, reachable) => reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB }.toVector}.toVector val connectAIO = reachableIO val connectBIO = probeIO val connectCIO = releaseIO val connectDIO = reachableIO val connectEIO = releaseIO def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } } val connectAOI = transpose(connectAIO) val connectBOI = transpose(connectBIO) val connectCOI = transpose(connectCIO) val connectDOI = transpose(connectDIO) val connectEOI = transpose(connectEIO) // Grab the port ID mapping val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client)) val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager)) // We need an intermediate size of bundle with the widest possible identifiers val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params)) // Handle size = 1 gracefully (Chisel3 empty range is broken) def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0) // Transform input bundle sources (sinks use global namespace on both sides) val in = Wire(Vec(io_in.size, TLBundle(wide_bundle))) for (i <- 0 until in.size) { val r = inputIdRanges(i) if (connectAIO(i).exists(x=>x)) { in(i).a.bits.user := DontCare in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll in(i).a.bits.source := io_in(i).a.bits.source | r.start.U } else { in(i).a := DontCare io_in(i).a := DontCare in(i).a.valid := false.B io_in(i).a.ready := true.B } if (connectBIO(i).exists(x=>x)) { io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size) } else { in(i).b := DontCare io_in(i).b := DontCare in(i).b.ready := true.B io_in(i).b.valid := false.B } if (connectCIO(i).exists(x=>x)) { in(i).c.bits.user := DontCare in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll in(i).c.bits.source := io_in(i).c.bits.source | r.start.U } else { in(i).c := DontCare io_in(i).c := DontCare in(i).c.valid := false.B io_in(i).c.ready := true.B } if (connectDIO(i).exists(x=>x)) { io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size) } else { in(i).d := DontCare io_in(i).d := DontCare in(i).d.ready := true.B io_in(i).d.valid := false.B } if (connectEIO(i).exists(x=>x)) { in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll } else { in(i).e := DontCare io_in(i).e := DontCare in(i).e.valid := false.B io_in(i).e.ready := true.B } } // Transform output bundle sinks (sources use global namespace on both sides) val out = Wire(Vec(io_out.size, TLBundle(wide_bundle))) for (o <- 0 until out.size) { val r = outputIdRanges(o) if (connectAOI(o).exists(x=>x)) { out(o).a.bits.user := DontCare io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll } else { out(o).a := DontCare io_out(o).a := DontCare out(o).a.ready := true.B io_out(o).a.valid := false.B } if (connectBOI(o).exists(x=>x)) { out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll } else { out(o).b := DontCare io_out(o).b := DontCare out(o).b.valid := false.B io_out(o).b.ready := true.B } if (connectCOI(o).exists(x=>x)) { out(o).c.bits.user := DontCare io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll } else { out(o).c := DontCare io_out(o).c := DontCare out(o).c.ready := true.B io_out(o).c.valid := false.B } if (connectDOI(o).exists(x=>x)) { out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U } else { out(o).d := DontCare io_out(o).d := DontCare out(o).d.valid := false.B io_out(o).d.ready := true.B } if (connectEOI(o).exists(x=>x)) { io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size) } else { out(o).e := DontCare io_out(o).e := DontCare out(o).e.ready := true.B io_out(o).e.valid := false.B } } // Filter a list to only those elements selected def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1) // Based on input=>output connectivity, create per-input minimal address decode circuits val requiredAC = (connectAIO ++ connectCIO).distinct val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO => val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address)) val routingMask = AddressDecoder(filter(port_addrs, connectO)) val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct)) // Print the address mapping if (false) { println("Xbar mapping:") route_addrs.foreach { p => print(" ") p.foreach { a => print(s" ${a}") } println("") } println("--") } (connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _))) }.toMap // Print the ID mapping if (false) { println(s"XBar mapping:") (edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) => println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}") } println("") } val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) } val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) } def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } } val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } } val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } } val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } } val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } } val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) } val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) } val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) } val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) } val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) } // Fanout the input sources to the output sinks val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) }) val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) }) val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) }) val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) }) val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) }) // Arbitrate amongst the sources for (o <- 0 until out.size) { TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*) TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*) TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*) filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B } filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B } filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B } } for (i <- 0 until in.size) { TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*) TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*) filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B } filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B } } } def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode = { val xbar = LazyModule(new TLXbar(policy, nameSuffix)) xbar.node } // Replicate an input port to each output port def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = { val filtered = Wire(Vec(select.size, chiselTypeOf(input))) for (i <- 0 until select.size) { filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits) filtered(i).valid := input.valid && (select(i) || (select.size == 1).B) } input.ready := Mux1H(select, filtered.map(_.ready)) filtered } } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("Xbar")) val xbar = LazyModule(new TLXbar) xbar.node := TLDelayer(0.1) := model.node := fuzz.node (0 until nManagers) foreach { n => val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff))) ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node } lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module) dut.io.start := io.start io.finished := dut.io.finished } class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val xbar = LazyModule(new TLXbar) val fuzzers = (0 until nClients) map { n => val fuzz = LazyModule(new TLFuzzer(txns)) xbar.node := TLDelayer(0.1) := fuzz.node fuzz } (0 until nManagers) foreach { n => val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff))) ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node } lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzzers.last.module.io.finished } } class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module) dut.io.start := io.start io.finished := dut.io.finished }
module TLXbar_pbus_out_i1_o3_a29d64s8k1z3u( // @[Xbar.scala:74:9] input clock, // @[Xbar.scala:74:9] input reset, // @[Xbar.scala:74:9] output auto_anon_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [28:0] auto_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_2_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_2_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_2_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_2_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_2_a_bits_size, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_2_a_bits_source, // @[LazyModuleImp.scala:107:25] output [28:0] auto_anon_out_2_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_2_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_2_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_2_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_2_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_2_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_2_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_2_d_bits_size, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_out_2_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_2_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_1_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_1_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_1_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_1_a_bits_size, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_1_a_bits_source, // @[LazyModuleImp.scala:107:25] output [28:0] auto_anon_out_1_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_1_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_1_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_1_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_1_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_1_d_bits_size, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_out_1_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_1_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_0_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_0_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_0_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_0_a_bits_size, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_0_a_bits_source, // @[LazyModuleImp.scala:107:25] output [12:0] auto_anon_out_0_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_0_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_0_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_0_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_0_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_0_d_bits_size, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_out_0_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_0_d_bits_data // @[LazyModuleImp.scala:107:25] ); wire [7:0] in_0_d_bits_source; // @[Xbar.scala:159:18] wire [7:0] in_0_a_bits_source; // @[Xbar.scala:159:18] wire auto_anon_in_a_valid_0 = auto_anon_in_a_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_a_bits_opcode_0 = auto_anon_in_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_a_bits_param_0 = auto_anon_in_a_bits_param; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_a_bits_size_0 = auto_anon_in_a_bits_size; // @[Xbar.scala:74:9] wire [7:0] auto_anon_in_a_bits_source_0 = auto_anon_in_a_bits_source; // @[Xbar.scala:74:9] wire [28:0] auto_anon_in_a_bits_address_0 = auto_anon_in_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] auto_anon_in_a_bits_mask_0 = auto_anon_in_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_a_bits_data_0 = auto_anon_in_a_bits_data; // @[Xbar.scala:74:9] wire auto_anon_in_a_bits_corrupt_0 = auto_anon_in_a_bits_corrupt; // @[Xbar.scala:74:9] wire auto_anon_in_d_ready_0 = auto_anon_in_d_ready; // @[Xbar.scala:74:9] wire auto_anon_out_2_a_ready_0 = auto_anon_out_2_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_valid_0 = auto_anon_out_2_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_d_bits_opcode_0 = auto_anon_out_2_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_d_bits_size_0 = auto_anon_out_2_d_bits_size; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_2_d_bits_source_0 = auto_anon_out_2_d_bits_source; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_2_d_bits_data_0 = auto_anon_out_2_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_1_a_ready_0 = auto_anon_out_1_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_valid_0 = auto_anon_out_1_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_d_bits_opcode_0 = auto_anon_out_1_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_d_bits_size_0 = auto_anon_out_1_d_bits_size; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_1_d_bits_source_0 = auto_anon_out_1_d_bits_source; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_1_d_bits_data_0 = auto_anon_out_1_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_0_a_ready_0 = auto_anon_out_0_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_valid_0 = auto_anon_out_0_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_0_d_bits_opcode_0 = auto_anon_out_0_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_0_d_bits_size_0 = auto_anon_out_0_d_bits_size; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_0_d_bits_source_0 = auto_anon_out_0_d_bits_source; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_0_d_bits_data_0 = auto_anon_out_0_d_bits_data; // @[Xbar.scala:74:9] wire _readys_T_2 = reset; // @[Arbiter.scala:22:12] wire auto_anon_in_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_in_d_bits_denied = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_in_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_bits_denied = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_bits_denied = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_bits_denied = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire anonIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17] wire anonIn_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17] wire anonIn_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire anonOut_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire anonOut_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17] wire anonOut_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_1_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_1_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_1_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire in_0_d_bits_sink = 1'h0; // @[Xbar.scala:159:18] wire in_0_d_bits_denied = 1'h0; // @[Xbar.scala:159:18] wire in_0_d_bits_corrupt = 1'h0; // @[Xbar.scala:159:18] wire out_0_d_bits_sink = 1'h0; // @[Xbar.scala:216:19] wire out_0_d_bits_denied = 1'h0; // @[Xbar.scala:216:19] wire out_0_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19] wire out_1_d_bits_sink = 1'h0; // @[Xbar.scala:216:19] wire out_1_d_bits_denied = 1'h0; // @[Xbar.scala:216:19] wire out_1_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19] wire out_2_d_bits_sink = 1'h0; // @[Xbar.scala:216:19] wire out_2_d_bits_denied = 1'h0; // @[Xbar.scala:216:19] wire out_2_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19] wire _out_0_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53] wire _out_1_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53] wire _out_2_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53] wire _addressC_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _requestBOI_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_5 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_4_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_4_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_5_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_5_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_10 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_5 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_10 = 1'h0; // @[Parameters.scala:54:10] wire _requestEIO_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_2_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_2_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_2_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_3_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_3_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_3_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_4_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_4_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_4_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_5_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_5_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_5_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _beatsBO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_1 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_4_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_4_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_5_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_5_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_2 = 1'h0; // @[Edges.scala:97:37] wire _beatsCI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire beatsCI_opdata = 1'h0; // @[Edges.scala:102:36] wire _beatsEI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _portsBIO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_1_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_1_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_1_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_3 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_4_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_4_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_5_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_5_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_2_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_2_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_2_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_5 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire portsCOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_1_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_1_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_1_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_2_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_2_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_2_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsCOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_1_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_2_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_T = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_1 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_2 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_3 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_4 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_WIRE_2 = 1'h0; // @[Mux.scala:30:73] wire portsDIO_filtered_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_0_bits_denied = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_1_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_1_0_bits_denied = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_1_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_2_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_2_0_bits_denied = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_2_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsEOI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _portsEOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _portsEOI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire portsEOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_1_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_1_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_1_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_2_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_2_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_2_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire _portsEOI_filtered_0_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_1_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_1_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_2_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_2_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_T = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_1 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_2 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_3 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_4 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_WIRE_2 = 1'h0; // @[Mux.scala:30:73] wire _state_WIRE_0 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_1 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_2 = 1'h0; // @[Arbiter.scala:88:34] wire _in_0_d_bits_WIRE_sink = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_WIRE_denied = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_WIRE_corrupt = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_1 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_2 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_3 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_4 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_WIRE_1 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_10 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_11 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_12 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_13 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_14 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_WIRE_5 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_15 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_16 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_17 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_18 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_19 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_WIRE_6 = 1'h0; // @[Mux.scala:30:73] wire [1:0] auto_anon_in_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_2_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_1_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_0_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] anonIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17] wire [1:0] anonOut_d_bits_param = 2'h0; // @[MixedNode.scala:542:17] wire [1:0] x1_anonOut_d_bits_param = 2'h0; // @[MixedNode.scala:542:17] wire [1:0] x1_anonOut_1_d_bits_param = 2'h0; // @[MixedNode.scala:542:17] wire [1:0] in_0_d_bits_param = 2'h0; // @[Xbar.scala:159:18] wire [1:0] out_0_d_bits_param = 2'h0; // @[Xbar.scala:216:19] wire [1:0] out_1_d_bits_param = 2'h0; // @[Xbar.scala:216:19] wire [1:0] out_2_d_bits_param = 2'h0; // @[Xbar.scala:216:19] wire [1:0] _requestBOI_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_4_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_5_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_4_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_5_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _portsBIO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_1_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_4_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_5_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_2_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsDIO_filtered_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsDIO_filtered_1_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsDIO_filtered_2_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _in_0_d_bits_WIRE_param = 2'h0; // @[Mux.scala:30:73] wire [1:0] _in_0_d_bits_T_30 = 2'h0; // @[Mux.scala:30:73] wire [1:0] _in_0_d_bits_T_31 = 2'h0; // @[Mux.scala:30:73] wire [1:0] _in_0_d_bits_T_32 = 2'h0; // @[Mux.scala:30:73] wire [1:0] _in_0_d_bits_T_33 = 2'h0; // @[Mux.scala:30:73] wire [1:0] _in_0_d_bits_T_34 = 2'h0; // @[Mux.scala:30:73] wire [1:0] _in_0_d_bits_WIRE_9 = 2'h0; // @[Mux.scala:30:73] wire _requestCIO_T_4 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_0 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_9 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_1 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_14 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_2 = 1'h1; // @[Xbar.scala:308:107] wire _requestBOI_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_4 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_0_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_6 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_7 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_8 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_9 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_1_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_11 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_12 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_13 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_14 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_2_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_4 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_0_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_6 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_7 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_8 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_9 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_1_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_11 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_12 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_13 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_14 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_2_0 = 1'h1; // @[Parameters.scala:56:48] wire beatsBO_opdata = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_1 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_2 = 1'h1; // @[Edges.scala:97:28] wire _portsBIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_4 = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_1_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_2_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_4 = 1'h1; // @[Xbar.scala:355:54] wire [63:0] _addressC_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _addressC_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _requestBOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsCI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _beatsCI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _portsBIO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_1_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_2_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsCOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _portsCOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] portsCOI_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_1_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_2_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [28:0] _addressC_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _addressC_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _requestCIO_T = 29'h0; // @[Parameters.scala:137:31] wire [28:0] _requestCIO_T_5 = 29'h0; // @[Parameters.scala:137:31] wire [28:0] _requestCIO_T_10 = 29'h0; // @[Parameters.scala:137:31] wire [28:0] _requestBOI_WIRE_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _requestBOI_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _requestBOI_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _requestBOI_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _requestBOI_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _requestBOI_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsBO_WIRE_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _beatsBO_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsBO_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _beatsBO_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsBO_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _beatsBO_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsCI_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _beatsCI_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _portsBIO_WIRE_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _portsBIO_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] portsBIO_filtered_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] _portsBIO_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _portsBIO_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] portsBIO_filtered_1_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] _portsBIO_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _portsBIO_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] portsBIO_filtered_2_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] _portsCOI_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _portsCOI_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] portsCOI_filtered_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] portsCOI_filtered_1_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] portsCOI_filtered_2_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [7:0] _addressC_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _addressC_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _requestBOI_WIRE_bits_source = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_uncommonBits_T = 8'h0; // @[Parameters.scala:52:29] wire [7:0] requestBOI_uncommonBits = 8'h0; // @[Parameters.scala:52:56] wire [7:0] _requestBOI_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_2_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_3_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_uncommonBits_T_1 = 8'h0; // @[Parameters.scala:52:29] wire [7:0] requestBOI_uncommonBits_1 = 8'h0; // @[Parameters.scala:52:56] wire [7:0] _requestBOI_WIRE_4_bits_source = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_4_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_5_bits_source = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_5_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_uncommonBits_T_2 = 8'h0; // @[Parameters.scala:52:29] wire [7:0] requestBOI_uncommonBits_2 = 8'h0; // @[Parameters.scala:52:56] wire [7:0] _beatsBO_WIRE_bits_source = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_2_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_3_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_4_bits_source = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_4_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_5_bits_source = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_5_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsCI_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _beatsCI_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _portsBIO_WIRE_bits_source = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _portsBIO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_0_bits_source = 8'h0; // @[Xbar.scala:352:24] wire [7:0] portsBIO_filtered_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_2_bits_source = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_2_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_3_bits_source = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _portsBIO_WIRE_3_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_1_0_bits_source = 8'h0; // @[Xbar.scala:352:24] wire [7:0] portsBIO_filtered_1_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_4_bits_source = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_4_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_5_bits_source = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _portsBIO_WIRE_5_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_2_0_bits_source = 8'h0; // @[Xbar.scala:352:24] wire [7:0] portsBIO_filtered_2_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsCOI_WIRE_bits_source = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _portsCOI_WIRE_1_bits_source = 8'h0; // @[Bundles.scala:265:61] wire [7:0] portsCOI_filtered_0_bits_source = 8'h0; // @[Xbar.scala:352:24] wire [7:0] portsCOI_filtered_1_bits_source = 8'h0; // @[Xbar.scala:352:24] wire [7:0] portsCOI_filtered_2_bits_source = 8'h0; // @[Xbar.scala:352:24] wire [2:0] _addressC_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _addressC_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _addressC_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _requestBOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_bits_size = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _beatsBO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_bits_size = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _beatsBO_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_0 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _beatsBO_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_1 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _beatsBO_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_2 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_2 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsCI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _beatsCI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _beatsCI_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] beatsCI_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsCI_0 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _portsBIO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_bits_size = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _portsBIO_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsBIO_filtered_0_bits_size = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _portsBIO_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_1_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsBIO_filtered_1_0_bits_size = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _portsBIO_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_2_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsBIO_filtered_2_0_bits_size = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsCOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _portsCOI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _portsCOI_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] portsCOI_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_0_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_0_bits_size = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_1_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_1_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_1_bits_size = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_2_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_2_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_2_bits_size = 3'h0; // @[Xbar.scala:352:24] wire [5:0] _beatsBO_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_5 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_8 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsCI_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_4 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_7 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsCI_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _beatsBO_decode_T = 13'h3F; // @[package.scala:243:71] wire [12:0] _beatsBO_decode_T_3 = 13'h3F; // @[package.scala:243:71] wire [12:0] _beatsBO_decode_T_6 = 13'h3F; // @[package.scala:243:71] wire [12:0] _beatsCI_decode_T = 13'h3F; // @[package.scala:243:71] wire [29:0] _requestCIO_T_1 = 30'h0; // @[Parameters.scala:137:41] wire [29:0] _requestCIO_T_2 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_3 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_6 = 30'h0; // @[Parameters.scala:137:41] wire [29:0] _requestCIO_T_7 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_8 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_11 = 30'h0; // @[Parameters.scala:137:41] wire [29:0] _requestCIO_T_12 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_13 = 30'h0; // @[Parameters.scala:137:46] wire anonIn_a_ready; // @[MixedNode.scala:551:17] wire anonIn_a_valid = auto_anon_in_a_valid_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_a_bits_opcode = auto_anon_in_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_a_bits_param = auto_anon_in_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_a_bits_size = auto_anon_in_a_bits_size_0; // @[Xbar.scala:74:9] wire [7:0] anonIn_a_bits_source = auto_anon_in_a_bits_source_0; // @[Xbar.scala:74:9] wire [28:0] anonIn_a_bits_address = auto_anon_in_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] anonIn_a_bits_mask = auto_anon_in_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] anonIn_a_bits_data = auto_anon_in_a_bits_data_0; // @[Xbar.scala:74:9] wire anonIn_a_bits_corrupt = auto_anon_in_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire anonIn_d_ready = auto_anon_in_d_ready_0; // @[Xbar.scala:74:9] wire anonIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] anonIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [2:0] anonIn_d_bits_size; // @[MixedNode.scala:551:17] wire [7:0] anonIn_d_bits_source; // @[MixedNode.scala:551:17] wire [63:0] anonIn_d_bits_data; // @[MixedNode.scala:551:17] wire x1_anonOut_1_a_ready = auto_anon_out_2_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_1_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_1_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_1_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_1_a_bits_size; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_1_a_bits_source; // @[MixedNode.scala:542:17] wire [28:0] x1_anonOut_1_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_1_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_1_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_1_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_1_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_1_d_valid = auto_anon_out_2_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_1_d_bits_opcode = auto_anon_out_2_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_1_d_bits_size = auto_anon_out_2_d_bits_size_0; // @[Xbar.scala:74:9] wire [7:0] x1_anonOut_1_d_bits_source = auto_anon_out_2_d_bits_source_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_1_d_bits_data = auto_anon_out_2_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_a_ready = auto_anon_out_1_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_a_bits_size; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_a_bits_source; // @[MixedNode.scala:542:17] wire [28:0] x1_anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_d_valid = auto_anon_out_1_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_d_bits_opcode = auto_anon_out_1_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_d_bits_size = auto_anon_out_1_d_bits_size_0; // @[Xbar.scala:74:9] wire [7:0] x1_anonOut_d_bits_source = auto_anon_out_1_d_bits_source_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_d_bits_data = auto_anon_out_1_d_bits_data_0; // @[Xbar.scala:74:9] wire anonOut_a_ready = auto_anon_out_0_a_ready_0; // @[Xbar.scala:74:9] wire anonOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] anonOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] anonOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] anonOut_a_bits_size; // @[MixedNode.scala:542:17] wire [7:0] anonOut_a_bits_source; // @[MixedNode.scala:542:17] wire [12:0] anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] anonOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] anonOut_a_bits_data; // @[MixedNode.scala:542:17] wire anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire anonOut_d_ready; // @[MixedNode.scala:542:17] wire anonOut_d_valid = auto_anon_out_0_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] anonOut_d_bits_opcode = auto_anon_out_0_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] anonOut_d_bits_size = auto_anon_out_0_d_bits_size_0; // @[Xbar.scala:74:9] wire [7:0] anonOut_d_bits_source = auto_anon_out_0_d_bits_source_0; // @[Xbar.scala:74:9] wire [63:0] anonOut_d_bits_data = auto_anon_out_0_d_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_in_a_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_d_bits_size_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_in_d_bits_source_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_d_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_in_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_a_bits_size_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_2_a_bits_source_0; // @[Xbar.scala:74:9] wire [28:0] auto_anon_out_2_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_2_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_2_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_2_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_2_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_a_bits_size_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_1_a_bits_source_0; // @[Xbar.scala:74:9] wire [28:0] auto_anon_out_1_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_1_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_1_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_1_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_1_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_0_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_0_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_0_a_bits_size_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_0_a_bits_source_0; // @[Xbar.scala:74:9] wire [12:0] auto_anon_out_0_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_0_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_0_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_0_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_0_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_ready_0; // @[Xbar.scala:74:9] wire in_0_a_ready; // @[Xbar.scala:159:18] assign auto_anon_in_a_ready_0 = anonIn_a_ready; // @[Xbar.scala:74:9] wire in_0_a_valid = anonIn_a_valid; // @[Xbar.scala:159:18] wire [2:0] in_0_a_bits_opcode = anonIn_a_bits_opcode; // @[Xbar.scala:159:18] wire [2:0] in_0_a_bits_param = anonIn_a_bits_param; // @[Xbar.scala:159:18] wire [2:0] in_0_a_bits_size = anonIn_a_bits_size; // @[Xbar.scala:159:18] wire [7:0] _in_0_a_bits_source_T = anonIn_a_bits_source; // @[Xbar.scala:166:55] wire [28:0] in_0_a_bits_address = anonIn_a_bits_address; // @[Xbar.scala:159:18] wire [7:0] in_0_a_bits_mask = anonIn_a_bits_mask; // @[Xbar.scala:159:18] wire [63:0] in_0_a_bits_data = anonIn_a_bits_data; // @[Xbar.scala:159:18] wire in_0_a_bits_corrupt = anonIn_a_bits_corrupt; // @[Xbar.scala:159:18] wire in_0_d_ready = anonIn_d_ready; // @[Xbar.scala:159:18] wire in_0_d_valid; // @[Xbar.scala:159:18] assign auto_anon_in_d_valid_0 = anonIn_d_valid; // @[Xbar.scala:74:9] wire [2:0] in_0_d_bits_opcode; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_opcode_0 = anonIn_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] in_0_d_bits_size; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_size_0 = anonIn_d_bits_size; // @[Xbar.scala:74:9] wire [7:0] _anonIn_d_bits_source_T; // @[Xbar.scala:156:69] assign auto_anon_in_d_bits_source_0 = anonIn_d_bits_source; // @[Xbar.scala:74:9] wire [63:0] in_0_d_bits_data; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_data_0 = anonIn_d_bits_data; // @[Xbar.scala:74:9] wire out_0_a_ready = anonOut_a_ready; // @[Xbar.scala:216:19] wire out_0_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_valid_0 = anonOut_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_0_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_opcode_0 = anonOut_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_0_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_param_0 = anonOut_a_bits_param; // @[Xbar.scala:74:9] wire [2:0] out_0_a_bits_size; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_size_0 = anonOut_a_bits_size; // @[Xbar.scala:74:9] wire [7:0] out_0_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_source_0 = anonOut_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_0_a_bits_address_0 = anonOut_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_0_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_mask_0 = anonOut_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_0_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_data_0 = anonOut_a_bits_data; // @[Xbar.scala:74:9] wire out_0_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_corrupt_0 = anonOut_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_0_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_0_d_ready_0 = anonOut_d_ready; // @[Xbar.scala:74:9] wire out_0_d_valid = anonOut_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_0_d_bits_opcode = anonOut_d_bits_opcode; // @[Xbar.scala:216:19] wire [2:0] out_0_d_bits_size = anonOut_d_bits_size; // @[Xbar.scala:216:19] wire [7:0] out_0_d_bits_source = anonOut_d_bits_source; // @[Xbar.scala:216:19] wire [63:0] out_0_d_bits_data = anonOut_d_bits_data; // @[Xbar.scala:216:19] wire out_1_a_ready = x1_anonOut_a_ready; // @[Xbar.scala:216:19] wire out_1_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_valid_0 = x1_anonOut_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_1_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_opcode_0 = x1_anonOut_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_1_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_param_0 = x1_anonOut_a_bits_param; // @[Xbar.scala:74:9] wire [2:0] out_1_a_bits_size; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_size_0 = x1_anonOut_a_bits_size; // @[Xbar.scala:74:9] wire [7:0] out_1_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_source_0 = x1_anonOut_a_bits_source; // @[Xbar.scala:74:9] wire [28:0] out_1_a_bits_address; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_address_0 = x1_anonOut_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_1_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_mask_0 = x1_anonOut_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_1_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_data_0 = x1_anonOut_a_bits_data; // @[Xbar.scala:74:9] wire out_1_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_corrupt_0 = x1_anonOut_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_1_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_1_d_ready_0 = x1_anonOut_d_ready; // @[Xbar.scala:74:9] wire out_1_d_valid = x1_anonOut_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_1_d_bits_opcode = x1_anonOut_d_bits_opcode; // @[Xbar.scala:216:19] wire [2:0] out_1_d_bits_size = x1_anonOut_d_bits_size; // @[Xbar.scala:216:19] wire [7:0] out_1_d_bits_source = x1_anonOut_d_bits_source; // @[Xbar.scala:216:19] wire [63:0] out_1_d_bits_data = x1_anonOut_d_bits_data; // @[Xbar.scala:216:19] wire out_2_a_ready = x1_anonOut_1_a_ready; // @[Xbar.scala:216:19] wire out_2_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_valid_0 = x1_anonOut_1_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_2_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_opcode_0 = x1_anonOut_1_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_2_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_param_0 = x1_anonOut_1_a_bits_param; // @[Xbar.scala:74:9] wire [2:0] out_2_a_bits_size; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_size_0 = x1_anonOut_1_a_bits_size; // @[Xbar.scala:74:9] wire [7:0] out_2_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_source_0 = x1_anonOut_1_a_bits_source; // @[Xbar.scala:74:9] wire [28:0] out_2_a_bits_address; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_address_0 = x1_anonOut_1_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_2_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_mask_0 = x1_anonOut_1_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_2_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_data_0 = x1_anonOut_1_a_bits_data; // @[Xbar.scala:74:9] wire out_2_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_corrupt_0 = x1_anonOut_1_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_2_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_2_d_ready_0 = x1_anonOut_1_d_ready; // @[Xbar.scala:74:9] wire out_2_d_valid = x1_anonOut_1_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_2_d_bits_opcode = x1_anonOut_1_d_bits_opcode; // @[Xbar.scala:216:19] wire [2:0] out_2_d_bits_size = x1_anonOut_1_d_bits_size; // @[Xbar.scala:216:19] wire [7:0] out_2_d_bits_source = x1_anonOut_1_d_bits_source; // @[Xbar.scala:216:19] wire [63:0] out_2_d_bits_data = x1_anonOut_1_d_bits_data; // @[Xbar.scala:216:19] wire _portsAOI_in_0_a_ready_WIRE; // @[Mux.scala:30:73] assign anonIn_a_ready = in_0_a_ready; // @[Xbar.scala:159:18] wire [2:0] portsAOI_filtered_0_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_1_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_2_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_0_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_1_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_2_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_0_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_1_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_2_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_0_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_1_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_2_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [28:0] _requestAIO_T = in_0_a_bits_address; // @[Xbar.scala:159:18] wire [28:0] portsAOI_filtered_0_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [28:0] portsAOI_filtered_1_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [28:0] portsAOI_filtered_2_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_0_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_1_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_2_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_0_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_1_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_2_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_0_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_1_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_2_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire _in_0_d_valid_T_7; // @[Arbiter.scala:96:24] assign anonIn_d_valid = in_0_d_valid; // @[Xbar.scala:159:18] wire [2:0] _in_0_d_bits_WIRE_opcode; // @[Mux.scala:30:73] assign anonIn_d_bits_opcode = in_0_d_bits_opcode; // @[Xbar.scala:159:18] wire [2:0] _in_0_d_bits_WIRE_size; // @[Mux.scala:30:73] assign anonIn_d_bits_size = in_0_d_bits_size; // @[Xbar.scala:159:18] wire [7:0] _in_0_d_bits_WIRE_source; // @[Mux.scala:30:73] assign _anonIn_d_bits_source_T = in_0_d_bits_source; // @[Xbar.scala:156:69, :159:18] wire [63:0] _in_0_d_bits_WIRE_data; // @[Mux.scala:30:73] assign anonIn_d_bits_data = in_0_d_bits_data; // @[Xbar.scala:159:18] assign in_0_a_bits_source = _in_0_a_bits_source_T; // @[Xbar.scala:159:18, :166:55] assign anonIn_d_bits_source = _anonIn_d_bits_source_T; // @[Xbar.scala:156:69] wire portsAOI_filtered_0_ready = out_0_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_0_valid; // @[Xbar.scala:352:24] assign anonOut_a_valid = out_0_a_valid; // @[Xbar.scala:216:19] assign anonOut_a_bits_opcode = out_0_a_bits_opcode; // @[Xbar.scala:216:19] assign anonOut_a_bits_param = out_0_a_bits_param; // @[Xbar.scala:216:19] assign anonOut_a_bits_size = out_0_a_bits_size; // @[Xbar.scala:216:19] assign anonOut_a_bits_source = out_0_a_bits_source; // @[Xbar.scala:216:19] assign anonOut_a_bits_mask = out_0_a_bits_mask; // @[Xbar.scala:216:19] assign anonOut_a_bits_data = out_0_a_bits_data; // @[Xbar.scala:216:19] assign anonOut_a_bits_corrupt = out_0_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_0_ready; // @[Xbar.scala:352:24] assign anonOut_d_ready = out_0_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_1 = out_0_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_0_bits_opcode = out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [2:0] portsDIO_filtered_0_bits_size = out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [7:0] _requestDOI_uncommonBits_T = out_0_d_bits_source; // @[Xbar.scala:216:19] wire [7:0] portsDIO_filtered_0_bits_source = out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_0_bits_data = out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_1_ready = out_1_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_1_valid; // @[Xbar.scala:352:24] assign x1_anonOut_a_valid = out_1_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_opcode = out_1_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_param = out_1_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_size = out_1_a_bits_size; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_source = out_1_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_address = out_1_a_bits_address; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_mask = out_1_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_data = out_1_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_corrupt = out_1_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_1_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_d_ready = out_1_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_3 = out_1_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_1_0_bits_opcode = out_1_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [2:0] portsDIO_filtered_1_0_bits_size = out_1_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [7:0] _requestDOI_uncommonBits_T_1 = out_1_d_bits_source; // @[Xbar.scala:216:19] wire [7:0] portsDIO_filtered_1_0_bits_source = out_1_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_1_0_bits_data = out_1_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_2_ready = out_2_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_2_valid; // @[Xbar.scala:352:24] assign x1_anonOut_1_a_valid = out_2_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_opcode = out_2_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_param = out_2_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_size = out_2_a_bits_size; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_source = out_2_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_address = out_2_a_bits_address; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_mask = out_2_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_data = out_2_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_corrupt = out_2_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_2_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_1_d_ready = out_2_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_5 = out_2_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_2_0_bits_opcode = out_2_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [2:0] portsDIO_filtered_2_0_bits_size = out_2_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [7:0] _requestDOI_uncommonBits_T_2 = out_2_d_bits_source; // @[Xbar.scala:216:19] wire [7:0] portsDIO_filtered_2_0_bits_source = out_2_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_2_0_bits_data = out_2_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire [28:0] out_0_a_bits_address; // @[Xbar.scala:216:19] assign anonOut_a_bits_address = out_0_a_bits_address[12:0]; // @[Xbar.scala:216:19, :222:41] wire [29:0] _requestAIO_T_1 = {1'h0, _requestAIO_T}; // @[Parameters.scala:137:{31,41}] wire [29:0] _requestAIO_T_2 = _requestAIO_T_1 & 30'h10020000; // @[Parameters.scala:137:{41,46}] wire [29:0] _requestAIO_T_3 = _requestAIO_T_2; // @[Parameters.scala:137:46] wire _requestAIO_T_4 = _requestAIO_T_3 == 30'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_0 = _requestAIO_T_4; // @[Xbar.scala:307:107] wire _portsAOI_filtered_0_valid_T = requestAIO_0_0; // @[Xbar.scala:307:107, :355:54] wire [28:0] _requestAIO_T_5 = in_0_a_bits_address ^ 29'h10020000; // @[Xbar.scala:159:18] wire [29:0] _requestAIO_T_6 = {1'h0, _requestAIO_T_5}; // @[Parameters.scala:137:{31,41}] wire [29:0] _requestAIO_T_7 = _requestAIO_T_6 & 30'h10020000; // @[Parameters.scala:137:{41,46}] wire [29:0] _requestAIO_T_8 = _requestAIO_T_7; // @[Parameters.scala:137:46] wire _requestAIO_T_9 = _requestAIO_T_8 == 30'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_1 = _requestAIO_T_9; // @[Xbar.scala:307:107] wire _portsAOI_filtered_1_valid_T = requestAIO_0_1; // @[Xbar.scala:307:107, :355:54] wire [28:0] _requestAIO_T_10 = in_0_a_bits_address ^ 29'h10000000; // @[Xbar.scala:159:18] wire [29:0] _requestAIO_T_11 = {1'h0, _requestAIO_T_10}; // @[Parameters.scala:137:{31,41}] wire [29:0] _requestAIO_T_12 = _requestAIO_T_11 & 30'h10020000; // @[Parameters.scala:137:{41,46}] wire [29:0] _requestAIO_T_13 = _requestAIO_T_12; // @[Parameters.scala:137:46] wire _requestAIO_T_14 = _requestAIO_T_13 == 30'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_2 = _requestAIO_T_14; // @[Xbar.scala:307:107] wire _portsAOI_filtered_2_valid_T = requestAIO_0_2; // @[Xbar.scala:307:107, :355:54] wire [7:0] requestDOI_uncommonBits = _requestDOI_uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [7:0] requestDOI_uncommonBits_1 = _requestDOI_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire [7:0] requestDOI_uncommonBits_2 = _requestDOI_uncommonBits_T_2; // @[Parameters.scala:52:{29,56}] wire [12:0] _beatsAI_decode_T = 13'h3F << in_0_a_bits_size; // @[package.scala:243:71] wire [5:0] _beatsAI_decode_T_1 = _beatsAI_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsAI_decode_T_2 = ~_beatsAI_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] beatsAI_decode = _beatsAI_decode_T_2[5:3]; // @[package.scala:243:46] wire _beatsAI_opdata_T = in_0_a_bits_opcode[2]; // @[Xbar.scala:159:18] wire beatsAI_opdata = ~_beatsAI_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] beatsAI_0 = beatsAI_opdata ? beatsAI_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] wire [12:0] _beatsDO_decode_T = 13'h3F << out_0_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_1 = _beatsDO_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_2 = ~_beatsDO_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode = _beatsDO_decode_T_2[5:3]; // @[package.scala:243:46] wire beatsDO_opdata = out_0_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_0 = beatsDO_opdata ? beatsDO_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [12:0] _beatsDO_decode_T_3 = 13'h3F << out_1_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_4 = _beatsDO_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_5 = ~_beatsDO_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_1 = _beatsDO_decode_T_5[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_1 = out_1_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_1 = beatsDO_opdata_1 ? beatsDO_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [12:0] _beatsDO_decode_T_6 = 13'h3F << out_2_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_7 = _beatsDO_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_8 = ~_beatsDO_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_2 = _beatsDO_decode_T_8[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_2 = out_2_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_2 = beatsDO_opdata_2 ? beatsDO_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire _portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:355:40] assign out_0_a_valid = portsAOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_opcode = portsAOI_filtered_0_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_param = portsAOI_filtered_0_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_size = portsAOI_filtered_0_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_source = portsAOI_filtered_0_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_address = portsAOI_filtered_0_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_mask = portsAOI_filtered_0_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_data = portsAOI_filtered_0_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_corrupt = portsAOI_filtered_0_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_1_valid_T_1; // @[Xbar.scala:355:40] assign out_1_a_valid = portsAOI_filtered_1_valid; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_opcode = portsAOI_filtered_1_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_param = portsAOI_filtered_1_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_size = portsAOI_filtered_1_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_source = portsAOI_filtered_1_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_address = portsAOI_filtered_1_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_mask = portsAOI_filtered_1_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_data = portsAOI_filtered_1_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_corrupt = portsAOI_filtered_1_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_2_valid_T_1; // @[Xbar.scala:355:40] assign out_2_a_valid = portsAOI_filtered_2_valid; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_opcode = portsAOI_filtered_2_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_param = portsAOI_filtered_2_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_size = portsAOI_filtered_2_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_source = portsAOI_filtered_2_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_address = portsAOI_filtered_2_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_mask = portsAOI_filtered_2_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_data = portsAOI_filtered_2_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_corrupt = portsAOI_filtered_2_bits_corrupt; // @[Xbar.scala:216:19, :352:24] assign _portsAOI_filtered_0_valid_T_1 = in_0_a_valid & _portsAOI_filtered_0_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_0_valid = _portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_1_valid_T_1 = in_0_a_valid & _portsAOI_filtered_1_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_1_valid = _portsAOI_filtered_1_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_2_valid_T_1 = in_0_a_valid & _portsAOI_filtered_2_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_2_valid = _portsAOI_filtered_2_valid_T_1; // @[Xbar.scala:352:24, :355:40] wire _portsAOI_in_0_a_ready_T = requestAIO_0_0 & portsAOI_filtered_0_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_1 = requestAIO_0_1 & portsAOI_filtered_1_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_2 = requestAIO_0_2 & portsAOI_filtered_2_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_3 = _portsAOI_in_0_a_ready_T | _portsAOI_in_0_a_ready_T_1; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_4 = _portsAOI_in_0_a_ready_T_3 | _portsAOI_in_0_a_ready_T_2; // @[Mux.scala:30:73] assign _portsAOI_in_0_a_ready_WIRE = _portsAOI_in_0_a_ready_T_4; // @[Mux.scala:30:73] assign in_0_a_ready = _portsAOI_in_0_a_ready_WIRE; // @[Mux.scala:30:73] wire _filtered_0_ready_T; // @[Arbiter.scala:94:31] assign out_0_d_ready = portsDIO_filtered_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_0_valid = _portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_1; // @[Arbiter.scala:94:31] assign out_1_d_ready = portsDIO_filtered_1_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_1_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_1_0_valid = _portsDIO_filtered_0_valid_T_3; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_2; // @[Arbiter.scala:94:31] assign out_2_d_ready = portsDIO_filtered_2_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_2_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_2_0_valid = _portsDIO_filtered_0_valid_T_5; // @[Xbar.scala:352:24, :355:40] reg [2:0] beatsLeft; // @[Arbiter.scala:60:30] wire idle = beatsLeft == 3'h0; // @[Arbiter.scala:60:30, :61:28] wire latch = idle & in_0_d_ready; // @[Xbar.scala:159:18] wire [1:0] readys_hi = {portsDIO_filtered_2_0_valid, portsDIO_filtered_1_0_valid}; // @[Xbar.scala:352:24] wire [2:0] _readys_T = {readys_hi, portsDIO_filtered_0_valid}; // @[Xbar.scala:352:24] wire [2:0] readys_valid = _readys_T; // @[Arbiter.scala:21:23, :68:51] wire _readys_T_1 = readys_valid == _readys_T; // @[Arbiter.scala:21:23, :22:19, :68:51] wire _readys_T_3 = ~_readys_T_2; // @[Arbiter.scala:22:12] wire _readys_T_4 = ~_readys_T_1; // @[Arbiter.scala:22:{12,19}] reg [2:0] readys_mask; // @[Arbiter.scala:23:23] wire [2:0] _readys_filter_T = ~readys_mask; // @[Arbiter.scala:23:23, :24:30] wire [2:0] _readys_filter_T_1 = readys_valid & _readys_filter_T; // @[Arbiter.scala:21:23, :24:{28,30}] wire [5:0] readys_filter = {_readys_filter_T_1, readys_valid}; // @[Arbiter.scala:21:23, :24:{21,28}] wire [4:0] _readys_unready_T = readys_filter[5:1]; // @[package.scala:262:48] wire [5:0] _readys_unready_T_1 = {readys_filter[5], readys_filter[4:0] | _readys_unready_T}; // @[package.scala:262:{43,48}] wire [3:0] _readys_unready_T_2 = _readys_unready_T_1[5:2]; // @[package.scala:262:{43,48}] wire [5:0] _readys_unready_T_3 = {_readys_unready_T_1[5:4], _readys_unready_T_1[3:0] | _readys_unready_T_2}; // @[package.scala:262:{43,48}] wire [5:0] _readys_unready_T_4 = _readys_unready_T_3; // @[package.scala:262:43, :263:17] wire [4:0] _readys_unready_T_5 = _readys_unready_T_4[5:1]; // @[package.scala:263:17] wire [5:0] _readys_unready_T_6 = {readys_mask, 3'h0}; // @[Arbiter.scala:23:23, :25:66] wire [5:0] readys_unready = {1'h0, _readys_unready_T_5} | _readys_unready_T_6; // @[Arbiter.scala:25:{52,58,66}] wire [2:0] _readys_readys_T = readys_unready[5:3]; // @[Arbiter.scala:25:58, :26:29] wire [2:0] _readys_readys_T_1 = readys_unready[2:0]; // @[Arbiter.scala:25:58, :26:48] wire [2:0] _readys_readys_T_2 = _readys_readys_T & _readys_readys_T_1; // @[Arbiter.scala:26:{29,39,48}] wire [2:0] readys_readys = ~_readys_readys_T_2; // @[Arbiter.scala:26:{18,39}] wire [2:0] _readys_T_7 = readys_readys; // @[Arbiter.scala:26:18, :30:11] wire _readys_T_5 = |readys_valid; // @[Arbiter.scala:21:23, :27:27] wire _readys_T_6 = latch & _readys_T_5; // @[Arbiter.scala:27:{18,27}, :62:24] wire [2:0] _readys_mask_T = readys_readys & readys_valid; // @[Arbiter.scala:21:23, :26:18, :28:29] wire [3:0] _readys_mask_T_1 = {_readys_mask_T, 1'h0}; // @[package.scala:253:48] wire [2:0] _readys_mask_T_2 = _readys_mask_T_1[2:0]; // @[package.scala:253:{48,53}] wire [2:0] _readys_mask_T_3 = _readys_mask_T | _readys_mask_T_2; // @[package.scala:253:{43,53}] wire [4:0] _readys_mask_T_4 = {_readys_mask_T_3, 2'h0}; // @[package.scala:253:{43,48}] wire [2:0] _readys_mask_T_5 = _readys_mask_T_4[2:0]; // @[package.scala:253:{48,53}] wire [2:0] _readys_mask_T_6 = _readys_mask_T_3 | _readys_mask_T_5; // @[package.scala:253:{43,53}] wire [2:0] _readys_mask_T_7 = _readys_mask_T_6; // @[package.scala:253:43, :254:17] wire _readys_T_8 = _readys_T_7[0]; // @[Arbiter.scala:30:11, :68:76] wire readys_0 = _readys_T_8; // @[Arbiter.scala:68:{27,76}] wire _readys_T_9 = _readys_T_7[1]; // @[Arbiter.scala:30:11, :68:76] wire readys_1 = _readys_T_9; // @[Arbiter.scala:68:{27,76}] wire _readys_T_10 = _readys_T_7[2]; // @[Arbiter.scala:30:11, :68:76] wire readys_2 = _readys_T_10; // @[Arbiter.scala:68:{27,76}] wire _winner_T = readys_0 & portsDIO_filtered_0_valid; // @[Xbar.scala:352:24] wire winner_0 = _winner_T; // @[Arbiter.scala:71:{27,69}] wire _winner_T_1 = readys_1 & portsDIO_filtered_1_0_valid; // @[Xbar.scala:352:24] wire winner_1 = _winner_T_1; // @[Arbiter.scala:71:{27,69}] wire _winner_T_2 = readys_2 & portsDIO_filtered_2_0_valid; // @[Xbar.scala:352:24] wire winner_2 = _winner_T_2; // @[Arbiter.scala:71:{27,69}] wire prefixOR_1 = winner_0; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_2 = prefixOR_1 | winner_1; // @[Arbiter.scala:71:27, :76:48] wire _prefixOR_T = prefixOR_2 | winner_2; // @[Arbiter.scala:71:27, :76:48] wire _in_0_d_valid_T = portsDIO_filtered_0_valid | portsDIO_filtered_1_0_valid; // @[Xbar.scala:352:24]
Generate the Verilog code corresponding to the following Chisel files. File ListBuffer.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import freechips.rocketchip.util._ case class ListBufferParameters[T <: Data](gen: T, queues: Int, entries: Int, bypass: Boolean) { val queueBits = log2Up(queues) val entryBits = log2Up(entries) } class ListBufferPush[T <: Data](params: ListBufferParameters[T]) extends Bundle { val index = UInt(params.queueBits.W) val data = Output(params.gen) } class ListBuffer[T <: Data](params: ListBufferParameters[T]) extends Module { override def desiredName = s"ListBuffer_${params.gen.typeName}_q${params.queues}_e${params.entries}" val io = IO(new Bundle { // push is visible on the same cycle; flow queues val push = Flipped(Decoupled(new ListBufferPush(params))) val valid = UInt(params.queues.W) val pop = Flipped(Valid(UInt(params.queueBits.W))) val data = Output(params.gen) }) val valid = RegInit(0.U(params.queues.W)) val head = Mem(params.queues, UInt(params.entryBits.W)) val tail = Mem(params.queues, UInt(params.entryBits.W)) val used = RegInit(0.U(params.entries.W)) val next = Mem(params.entries, UInt(params.entryBits.W)) val data = Mem(params.entries, params.gen) val freeOH = ~(leftOR(~used) << 1) & ~used val freeIdx = OHToUInt(freeOH) val valid_set = WireDefault(0.U(params.queues.W)) val valid_clr = WireDefault(0.U(params.queues.W)) val used_set = WireDefault(0.U(params.entries.W)) val used_clr = WireDefault(0.U(params.entries.W)) val push_tail = tail.read(io.push.bits.index) val push_valid = valid(io.push.bits.index) io.push.ready := !used.andR when (io.push.fire) { valid_set := UIntToOH(io.push.bits.index, params.queues) used_set := freeOH data.write(freeIdx, io.push.bits.data) when (push_valid) { next.write(push_tail, freeIdx) } .otherwise { head.write(io.push.bits.index, freeIdx) } tail.write(io.push.bits.index, freeIdx) } val pop_head = head.read(io.pop.bits) val pop_valid = valid(io.pop.bits) // Bypass push data to the peek port io.data := (if (!params.bypass) data.read(pop_head) else Mux(!pop_valid, io.push.bits.data, data.read(pop_head))) io.valid := (if (!params.bypass) valid else (valid | valid_set)) // It is an error to pop something that is not valid assert (!io.pop.fire || (io.valid)(io.pop.bits)) when (io.pop.fire) { used_clr := UIntToOH(pop_head, params.entries) when (pop_head === tail.read(io.pop.bits)) { valid_clr := UIntToOH(io.pop.bits, params.queues) } head.write(io.pop.bits, Mux(io.push.fire && push_valid && push_tail === pop_head, freeIdx, next.read(pop_head))) } // Empty bypass changes no state when ((!params.bypass).B || !io.pop.valid || pop_valid) { used := (used & ~used_clr) | used_set valid := (valid & ~valid_clr) | valid_set } }
module ListBuffer_PutBufferCEntry_q2_e8_7( // @[ListBuffer.scala:36:7] input clock, // @[ListBuffer.scala:36:7] input reset, // @[ListBuffer.scala:36:7] output io_push_ready, // @[ListBuffer.scala:39:14] input io_push_valid, // @[ListBuffer.scala:39:14] input io_push_bits_index, // @[ListBuffer.scala:39:14] input [127:0] io_push_bits_data_data, // @[ListBuffer.scala:39:14] input io_push_bits_data_corrupt, // @[ListBuffer.scala:39:14] output [1:0] io_valid, // @[ListBuffer.scala:39:14] input io_pop_valid, // @[ListBuffer.scala:39:14] input io_pop_bits, // @[ListBuffer.scala:39:14] output [127:0] io_data_data, // @[ListBuffer.scala:39:14] output io_data_corrupt // @[ListBuffer.scala:39:14] ); wire [128:0] _data_ext_R0_data; // @[ListBuffer.scala:52:18] wire [2:0] _next_ext_R0_data; // @[ListBuffer.scala:51:18] wire [2:0] _tail_ext_R0_data; // @[ListBuffer.scala:49:18] wire [2:0] _tail_ext_R1_data; // @[ListBuffer.scala:49:18] wire [2:0] _head_ext_R0_data; // @[ListBuffer.scala:48:18] wire io_push_valid_0 = io_push_valid; // @[ListBuffer.scala:36:7] wire io_push_bits_index_0 = io_push_bits_index; // @[ListBuffer.scala:36:7] wire [127:0] io_push_bits_data_data_0 = io_push_bits_data_data; // @[ListBuffer.scala:36:7] wire io_push_bits_data_corrupt_0 = io_push_bits_data_corrupt; // @[ListBuffer.scala:36:7] wire io_pop_valid_0 = io_pop_valid; // @[ListBuffer.scala:36:7] wire io_pop_bits_0 = io_pop_bits; // @[ListBuffer.scala:36:7] wire _io_push_ready_T_1; // @[ListBuffer.scala:65:20] wire valid_set_shiftAmount = io_push_bits_index_0; // @[OneHot.scala:64:49] wire valid_clr_shiftAmount = io_pop_bits_0; // @[OneHot.scala:64:49] wire io_push_ready_0; // @[ListBuffer.scala:36:7] wire [127:0] io_data_data_0; // @[ListBuffer.scala:36:7] wire io_data_corrupt_0; // @[ListBuffer.scala:36:7] wire [1:0] io_valid_0; // @[ListBuffer.scala:36:7] reg [1:0] valid; // @[ListBuffer.scala:47:22] assign io_valid_0 = valid; // @[ListBuffer.scala:36:7, :47:22] reg [7:0] used; // @[ListBuffer.scala:50:22] assign io_data_data_0 = _data_ext_R0_data[127:0]; // @[ListBuffer.scala:36:7, :52:18] assign io_data_corrupt_0 = _data_ext_R0_data[128]; // @[ListBuffer.scala:36:7, :52:18] wire [7:0] _freeOH_T = ~used; // @[ListBuffer.scala:50:22, :54:25] wire [8:0] _freeOH_T_1 = {_freeOH_T, 1'h0}; // @[package.scala:253:48] wire [7:0] _freeOH_T_2 = _freeOH_T_1[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _freeOH_T_3 = _freeOH_T | _freeOH_T_2; // @[package.scala:253:{43,53}] wire [9:0] _freeOH_T_4 = {_freeOH_T_3, 2'h0}; // @[package.scala:253:{43,48}] wire [7:0] _freeOH_T_5 = _freeOH_T_4[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _freeOH_T_6 = _freeOH_T_3 | _freeOH_T_5; // @[package.scala:253:{43,53}] wire [11:0] _freeOH_T_7 = {_freeOH_T_6, 4'h0}; // @[package.scala:253:{43,48}] wire [7:0] _freeOH_T_8 = _freeOH_T_7[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _freeOH_T_9 = _freeOH_T_6 | _freeOH_T_8; // @[package.scala:253:{43,53}] wire [7:0] _freeOH_T_10 = _freeOH_T_9; // @[package.scala:253:43, :254:17] wire [8:0] _freeOH_T_11 = {_freeOH_T_10, 1'h0}; // @[package.scala:254:17] wire [8:0] _freeOH_T_12 = ~_freeOH_T_11; // @[ListBuffer.scala:54:{16,32}] wire [7:0] _freeOH_T_13 = ~used; // @[ListBuffer.scala:50:22, :54:{25,40}] wire [8:0] freeOH = {1'h0, _freeOH_T_12[7:0] & _freeOH_T_13}; // @[ListBuffer.scala:54:{16,38,40}] wire freeIdx_hi = freeOH[8]; // @[OneHot.scala:30:18] wire _freeIdx_T = freeIdx_hi; // @[OneHot.scala:30:18, :32:14] wire [7:0] freeIdx_lo = freeOH[7:0]; // @[OneHot.scala:31:18] wire [7:0] _freeIdx_T_1 = {7'h0, freeIdx_hi} | freeIdx_lo; // @[OneHot.scala:30:18, :31:18, :32:28] wire [3:0] freeIdx_hi_1 = _freeIdx_T_1[7:4]; // @[OneHot.scala:30:18, :32:28] wire [3:0] freeIdx_lo_1 = _freeIdx_T_1[3:0]; // @[OneHot.scala:31:18, :32:28] wire _freeIdx_T_2 = |freeIdx_hi_1; // @[OneHot.scala:30:18, :32:14] wire [3:0] _freeIdx_T_3 = freeIdx_hi_1 | freeIdx_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28] wire [1:0] freeIdx_hi_2 = _freeIdx_T_3[3:2]; // @[OneHot.scala:30:18, :32:28] wire [1:0] freeIdx_lo_2 = _freeIdx_T_3[1:0]; // @[OneHot.scala:31:18, :32:28] wire _freeIdx_T_4 = |freeIdx_hi_2; // @[OneHot.scala:30:18, :32:14] wire [1:0] _freeIdx_T_5 = freeIdx_hi_2 | freeIdx_lo_2; // @[OneHot.scala:30:18, :31:18, :32:28] wire _freeIdx_T_6 = _freeIdx_T_5[1]; // @[OneHot.scala:32:28] wire [1:0] _freeIdx_T_7 = {_freeIdx_T_4, _freeIdx_T_6}; // @[OneHot.scala:32:{10,14}] wire [2:0] _freeIdx_T_8 = {_freeIdx_T_2, _freeIdx_T_7}; // @[OneHot.scala:32:{10,14}] wire [3:0] freeIdx = {_freeIdx_T, _freeIdx_T_8}; // @[OneHot.scala:32:{10,14}] wire [1:0] valid_set; // @[ListBuffer.scala:57:30] wire [1:0] valid_clr; // @[ListBuffer.scala:58:30] wire [7:0] used_set; // @[ListBuffer.scala:59:30] wire [7:0] used_clr; // @[ListBuffer.scala:60:30] wire [1:0] _push_valid_T = valid >> io_push_bits_index_0; // @[ListBuffer.scala:36:7, :47:22, :63:25] wire push_valid = _push_valid_T[0]; // @[ListBuffer.scala:63:25] wire _io_push_ready_T = &used; // @[ListBuffer.scala:50:22, :65:26] assign _io_push_ready_T_1 = ~_io_push_ready_T; // @[ListBuffer.scala:65:{20,26}] assign io_push_ready_0 = _io_push_ready_T_1; // @[ListBuffer.scala:36:7, :65:20] wire data_MPORT_en = io_push_ready_0 & io_push_valid_0; // @[Decoupled.scala:51:35] wire [1:0] _valid_set_T = 2'h1 << valid_set_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [1:0] _valid_set_T_1 = _valid_set_T; // @[OneHot.scala:65:{12,27}] assign valid_set = data_MPORT_en ? _valid_set_T_1 : 2'h0; // @[OneHot.scala:65:27] assign used_set = data_MPORT_en ? freeIdx_lo : 8'h0; // @[OneHot.scala:31:18] wire [1:0] _GEN = {1'h0, io_pop_bits_0}; // @[ListBuffer.scala:36:7, :79:24] wire [1:0] _pop_valid_T = valid >> _GEN; // @[ListBuffer.scala:47:22, :79:24] wire pop_valid = _pop_valid_T[0]; // @[ListBuffer.scala:79:24]
Generate the Verilog code corresponding to the following Chisel files. File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Metadata.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.constants.MemoryOpConstants import freechips.rocketchip.util._ object ClientStates { val width = 2 def Nothing = 0.U(width.W) def Branch = 1.U(width.W) def Trunk = 2.U(width.W) def Dirty = 3.U(width.W) def hasReadPermission(state: UInt): Bool = state > Nothing def hasWritePermission(state: UInt): Bool = state > Branch } object MemoryOpCategories extends MemoryOpConstants { def wr = Cat(true.B, true.B) // Op actually writes def wi = Cat(false.B, true.B) // Future op will write def rd = Cat(false.B, false.B) // Op only reads def categorize(cmd: UInt): UInt = { val cat = Cat(isWrite(cmd), isWriteIntent(cmd)) //assert(cat.isOneOf(wr,wi,rd), "Could not categorize command.") cat } } /** Stores the client-side coherence information, * such as permissions on the data and whether the data is dirty. * Its API can be used to make TileLink messages in response to * memory operations, cache control oeprations, or Probe messages. */ class ClientMetadata extends Bundle { /** Actual state information stored in this bundle */ val state = UInt(ClientStates.width.W) /** Metadata equality */ def ===(rhs: UInt): Bool = state === rhs def ===(rhs: ClientMetadata): Bool = state === rhs.state def =/=(rhs: ClientMetadata): Bool = !this.===(rhs) /** Is the block's data present in this cache */ def isValid(dummy: Int = 0): Bool = state > ClientStates.Nothing /** Determine whether this cmd misses, and the new state (on hit) or param to be sent (on miss) */ private def growStarter(cmd: UInt): (Bool, UInt) = { import MemoryOpCategories._ import TLPermissions._ import ClientStates._ val c = categorize(cmd) MuxTLookup(Cat(c, state), (false.B, 0.U), Seq( //(effect, am now) -> (was a hit, next) Cat(rd, Dirty) -> (true.B, Dirty), Cat(rd, Trunk) -> (true.B, Trunk), Cat(rd, Branch) -> (true.B, Branch), Cat(wi, Dirty) -> (true.B, Dirty), Cat(wi, Trunk) -> (true.B, Trunk), Cat(wr, Dirty) -> (true.B, Dirty), Cat(wr, Trunk) -> (true.B, Dirty), //(effect, am now) -> (was a miss, param) Cat(rd, Nothing) -> (false.B, NtoB), Cat(wi, Branch) -> (false.B, BtoT), Cat(wi, Nothing) -> (false.B, NtoT), Cat(wr, Branch) -> (false.B, BtoT), Cat(wr, Nothing) -> (false.B, NtoT))) } /** Determine what state to go to after miss based on Grant param * For now, doesn't depend on state (which may have been Probed). */ private def growFinisher(cmd: UInt, param: UInt): UInt = { import MemoryOpCategories._ import TLPermissions._ import ClientStates._ val c = categorize(cmd) //assert(c === rd || param === toT, "Client was expecting trunk permissions.") MuxLookup(Cat(c, param), Nothing)(Seq( //(effect param) -> (next) Cat(rd, toB) -> Branch, Cat(rd, toT) -> Trunk, Cat(wi, toT) -> Trunk, Cat(wr, toT) -> Dirty)) } /** Does this cache have permissions on this block sufficient to perform op, * and what to do next (Acquire message param or updated metadata). */ def onAccess(cmd: UInt): (Bool, UInt, ClientMetadata) = { val r = growStarter(cmd) (r._1, r._2, ClientMetadata(r._2)) } /** Does a secondary miss on the block require another Acquire message */ def onSecondaryAccess(first_cmd: UInt, second_cmd: UInt): (Bool, Bool, UInt, ClientMetadata, UInt) = { import MemoryOpCategories._ val r1 = growStarter(first_cmd) val r2 = growStarter(second_cmd) val needs_second_acq = isWriteIntent(second_cmd) && !isWriteIntent(first_cmd) val hit_again = r1._1 && r2._1 val dirties = categorize(second_cmd) === wr val biggest_grow_param = Mux(dirties, r2._2, r1._2) val dirtiest_state = ClientMetadata(biggest_grow_param) val dirtiest_cmd = Mux(dirties, second_cmd, first_cmd) (needs_second_acq, hit_again, biggest_grow_param, dirtiest_state, dirtiest_cmd) } /** Metadata change on a returned Grant */ def onGrant(cmd: UInt, param: UInt): ClientMetadata = ClientMetadata(growFinisher(cmd, param)) /** Determine what state to go to based on Probe param */ private def shrinkHelper(param: UInt): (Bool, UInt, UInt) = { import ClientStates._ import TLPermissions._ MuxTLookup(Cat(param, state), (false.B, 0.U, 0.U), Seq( //(wanted, am now) -> (hasDirtyData resp, next) Cat(toT, Dirty) -> (true.B, TtoT, Trunk), Cat(toT, Trunk) -> (false.B, TtoT, Trunk), Cat(toT, Branch) -> (false.B, BtoB, Branch), Cat(toT, Nothing) -> (false.B, NtoN, Nothing), Cat(toB, Dirty) -> (true.B, TtoB, Branch), Cat(toB, Trunk) -> (false.B, TtoB, Branch), // Policy: Don't notify on clean downgrade Cat(toB, Branch) -> (false.B, BtoB, Branch), Cat(toB, Nothing) -> (false.B, NtoN, Nothing), Cat(toN, Dirty) -> (true.B, TtoN, Nothing), Cat(toN, Trunk) -> (false.B, TtoN, Nothing), // Policy: Don't notify on clean downgrade Cat(toN, Branch) -> (false.B, BtoN, Nothing), // Policy: Don't notify on clean downgrade Cat(toN, Nothing) -> (false.B, NtoN, Nothing))) } /** Translate cache control cmds into Probe param */ private def cmdToPermCap(cmd: UInt): UInt = { import MemoryOpCategories._ import TLPermissions._ MuxLookup(cmd, toN)(Seq( M_FLUSH -> toN, M_PRODUCE -> toB, M_CLEAN -> toT)) } def onCacheControl(cmd: UInt): (Bool, UInt, ClientMetadata) = { val r = shrinkHelper(cmdToPermCap(cmd)) (r._1, r._2, ClientMetadata(r._3)) } def onProbe(param: UInt): (Bool, UInt, ClientMetadata) = { val r = shrinkHelper(param) (r._1, r._2, ClientMetadata(r._3)) } } /** Factories for ClientMetadata, including on reset */ object ClientMetadata { def apply(perm: UInt) = { val meta = Wire(new ClientMetadata) meta.state := perm meta } def onReset = ClientMetadata(ClientStates.Nothing) def maximum = ClientMetadata(ClientStates.Dirty) } File NBDcache.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import chisel3.experimental.dataview._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ trait HasMissInfo extends Bundle with HasL1HellaCacheParameters { val tag_match = Bool() val old_meta = new L1Metadata val way_en = Bits(nWays.W) } class L1DataReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val way_en = Bits(nWays.W) val addr = Bits(untagBits.W) } class L1DataWriteReq(implicit p: Parameters) extends L1DataReadReq()(p) { val wmask = Bits(rowWords.W) val data = Bits(encRowBits.W) } class L1RefillReq(implicit p: Parameters) extends L1DataReadReq()(p) class Replay(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData class ReplayInternal(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasL1HellaCacheParameters { val sdq_id = UInt(log2Up(cfg.nSDQ).W) } class MSHRReq(implicit p: Parameters) extends Replay()(p) with HasMissInfo class MSHRReqInternal(implicit p: Parameters) extends ReplayInternal()(p) with HasMissInfo class WritebackReq(params: TLBundleParameters)(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val tag = Bits(tagBits.W) val idx = Bits(idxBits.W) val source = UInt(params.sourceBits.W) val param = UInt(TLPermissions.cWidth.W) val way_en = Bits(nWays.W) val voluntary = Bool() } class IOMSHR(id: Int)(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) { val io = IO(new Bundle { val req = Flipped(Decoupled(new HellaCacheReq)) val resp = Decoupled(new HellaCacheResp) val mem_access = Decoupled(new TLBundleA(edge.bundle)) val mem_ack = Flipped(Valid(new TLBundleD(edge.bundle))) val replay_next = Output(Bool()) val store_pending = Output(Bool()) }) def beatOffset(addr: UInt) = addr.extract(beatOffBits - 1, wordOffBits) def wordFromBeat(addr: UInt, dat: UInt) = { val shift = Cat(beatOffset(addr), 0.U((wordOffBits + log2Up(wordBytes)).W)) (dat >> shift)(wordBits - 1, 0) } val req = Reg(new HellaCacheReq) val grant_word = Reg(UInt(wordBits.W)) val s_idle :: s_mem_access :: s_mem_ack :: s_resp_1 :: s_resp_2 :: Nil = Enum(5) val state = RegInit(s_idle) io.req.ready := (state === s_idle) val loadgen = new LoadGen(req.size, req.signed, req.addr, grant_word, false.B, wordBytes) val a_source = id.U val a_address = req.addr val a_size = req.size val a_data = Fill(beatWords, req.data) val get = edge.Get(a_source, a_address, a_size)._2 val put = edge.Put(a_source, a_address, a_size, a_data)._2 val atomics = if (edge.manager.anySupportLogical) { MuxLookup(req.cmd, (0.U).asTypeOf(new TLBundleA(edge.bundle)))(Array( M_XA_SWAP -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.SWAP)._2, M_XA_XOR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.XOR) ._2, M_XA_OR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.OR) ._2, M_XA_AND -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.AND) ._2, M_XA_ADD -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.ADD)._2, M_XA_MIN -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MIN)._2, M_XA_MAX -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAX)._2, M_XA_MINU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MINU)._2, M_XA_MAXU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAXU)._2)) } else { // If no managers support atomics, assert fail if processor asks for them assert(state === s_idle || !isAMO(req.cmd)) (0.U).asTypeOf(new TLBundleA(edge.bundle)) } assert(state === s_idle || req.cmd =/= M_XSC) io.mem_access.valid := (state === s_mem_access) io.mem_access.bits := Mux(isAMO(req.cmd), atomics, Mux(isRead(req.cmd), get, put)) io.replay_next := state === s_resp_1 || (state === s_resp_2 && !io.resp.ready) io.resp.valid := state === s_resp_2 io.resp.bits.addr := req.addr io.resp.bits.idx.foreach(_ := req.idx.get) io.resp.bits.tag := req.tag io.resp.bits.cmd := req.cmd io.resp.bits.size := req.size io.resp.bits.signed := req.signed io.resp.bits.dprv := req.dprv io.resp.bits.dv := req.dv io.resp.bits.mask := req.mask io.resp.bits.has_data := isRead(req.cmd) io.resp.bits.data := loadgen.data io.resp.bits.data_raw := grant_word io.resp.bits.data_word_bypass := loadgen.wordData io.resp.bits.store_data := req.data io.resp.bits.replay := true.B io.store_pending := state =/= s_idle && isWrite(req.cmd) when (io.req.fire) { req := io.req.bits state := s_mem_access } when (io.mem_access.fire) { state := s_mem_ack } when (state === s_mem_ack && io.mem_ack.valid) { state := Mux(req.no_resp || !isRead(req.cmd), s_idle, s_resp_1) when (isRead(req.cmd)) { grant_word := wordFromBeat(req.addr, io.mem_ack.bits.data) } } when (state === s_resp_1) { state := s_resp_2 } when (io.resp.fire) { state := s_idle } } class MSHR(id: Int)(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) { val io = IO(new Bundle { val req_pri_val = Input(Bool()) val req_pri_rdy = Output(Bool()) val req_sec_val = Input(Bool()) val req_sec_rdy = Output(Bool()) val req_bits = Input(new MSHRReqInternal()) val idx_match = Output(Bool()) val tag = Output(Bits(tagBits.W)) val mem_acquire = Decoupled(new TLBundleA(edge.bundle)) val mem_grant = Flipped(Valid(new TLBundleD(edge.bundle))) val mem_finish = Decoupled(new TLBundleE(edge.bundle)) val refill = Output(new L1RefillReq()) // Data is bypassed val meta_read = Decoupled(new L1MetaReadReq) val meta_write = Decoupled(new L1MetaWriteReq) val replay = Decoupled(new ReplayInternal) val wb_req = Decoupled(new WritebackReq(edge.bundle)) val probe_rdy = Output(Bool()) }) val s_invalid :: s_wb_req :: s_wb_resp :: s_meta_clear :: s_refill_req :: s_refill_resp :: s_meta_write_req :: s_meta_write_resp :: s_drain_rpq :: Nil = Enum(9) val state = RegInit(s_invalid) val req = Reg(new MSHRReqInternal) val req_idx = req.addr(untagBits-1,blockOffBits) val req_tag = req.addr >> untagBits val req_block_addr = (req.addr >> blockOffBits) << blockOffBits val idx_match = req_idx === io.req_bits.addr(untagBits-1,blockOffBits) val new_coh = RegInit(ClientMetadata.onReset) val (_, shrink_param, coh_on_clear) = req.old_meta.coh.onCacheControl(M_FLUSH) val grow_param = new_coh.onAccess(req.cmd)._2 val coh_on_grant = new_coh.onGrant(req.cmd, io.mem_grant.bits.param) // We only accept secondary misses if we haven't yet sent an Acquire to outer memory // or if the Acquire that was sent will obtain a Grant with sufficient permissions // to let us replay this new request. I.e. we don't handle multiple outstanding // Acquires on the same block for now. val (cmd_requires_second_acquire, is_hit_again, _, dirtier_coh, dirtier_cmd) = new_coh.onSecondaryAccess(req.cmd, io.req_bits.cmd) val states_before_refill = Seq(s_wb_req, s_wb_resp, s_meta_clear) val (_, _, refill_done, refill_address_inc) = edge.addr_inc(io.mem_grant) val sec_rdy = idx_match && (state.isOneOf(states_before_refill) || (state.isOneOf(s_refill_req, s_refill_resp) && !cmd_requires_second_acquire && !refill_done)) val rpq = Module(new Queue(new ReplayInternal, cfg.nRPQ)) rpq.io.enq.valid := (io.req_pri_val && io.req_pri_rdy || io.req_sec_val && sec_rdy) && !isPrefetch(io.req_bits.cmd) rpq.io.enq.bits := io.req_bits rpq.io.deq.ready := (io.replay.ready && state === s_drain_rpq) || state === s_invalid val acked = Reg(Bool()) when (io.mem_grant.valid) { acked := true.B } when (state === s_drain_rpq && !rpq.io.deq.valid) { state := s_invalid } when (state === s_meta_write_resp) { // this wait state allows us to catch RAW hazards on the tags via nack_victim state := s_drain_rpq } when (state === s_meta_write_req && io.meta_write.ready) { state := s_meta_write_resp } when (state === s_refill_resp && refill_done) { new_coh := coh_on_grant state := s_meta_write_req } when (io.mem_acquire.fire) { // s_refill_req state := s_refill_resp } when (state === s_meta_clear && io.meta_write.ready) { state := s_refill_req } when (state === s_wb_resp && io.wb_req.ready && acked) { state := s_meta_clear } when (io.wb_req.fire) { // s_wb_req state := s_wb_resp } when (io.req_sec_val && io.req_sec_rdy) { // s_wb_req, s_wb_resp, s_refill_req //If we get a secondary miss that needs more permissions before we've sent // out the primary miss's Acquire, we can upgrade the permissions we're // going to ask for in s_refill_req req.cmd := dirtier_cmd when (is_hit_again) { new_coh := dirtier_coh } } when (io.req_pri_val && io.req_pri_rdy) { req := io.req_bits acked := false.B val old_coh = io.req_bits.old_meta.coh val needs_wb = old_coh.onCacheControl(M_FLUSH)._1 val (is_hit, _, coh_on_hit) = old_coh.onAccess(io.req_bits.cmd) when (io.req_bits.tag_match) { when (is_hit) { // set dirty bit new_coh := coh_on_hit state := s_meta_write_req }.otherwise { // upgrade permissions new_coh := old_coh state := s_refill_req } }.otherwise { // writback if necessary and refill new_coh := ClientMetadata.onReset state := Mux(needs_wb, s_wb_req, s_meta_clear) } } val grantackq = Module(new Queue(new TLBundleE(edge.bundle), 1)) val can_finish = state.isOneOf(s_invalid, s_refill_req) grantackq.io.enq.valid := refill_done && edge.isRequest(io.mem_grant.bits) grantackq.io.enq.bits := edge.GrantAck(io.mem_grant.bits) io.mem_finish.valid := grantackq.io.deq.valid && can_finish io.mem_finish.bits := grantackq.io.deq.bits grantackq.io.deq.ready := io.mem_finish.ready && can_finish io.idx_match := (state =/= s_invalid) && idx_match io.refill.way_en := req.way_en io.refill.addr := req_block_addr | refill_address_inc io.tag := req_tag io.req_pri_rdy := state === s_invalid io.req_sec_rdy := sec_rdy && rpq.io.enq.ready val meta_hazard = RegInit(0.U(2.W)) when (meta_hazard =/= 0.U) { meta_hazard := meta_hazard + 1.U } when (io.meta_write.fire) { meta_hazard := 1.U } io.probe_rdy := !idx_match || (!state.isOneOf(states_before_refill) && meta_hazard === 0.U) io.meta_write.valid := state.isOneOf(s_meta_write_req, s_meta_clear) io.meta_write.bits.idx := req_idx io.meta_write.bits.tag := io.tag io.meta_write.bits.data.coh := Mux(state === s_meta_clear, coh_on_clear, new_coh) io.meta_write.bits.data.tag := io.tag io.meta_write.bits.way_en := req.way_en io.wb_req.valid := state === s_wb_req io.wb_req.bits.source := id.U io.wb_req.bits.tag := req.old_meta.tag io.wb_req.bits.idx := req_idx io.wb_req.bits.param := shrink_param io.wb_req.bits.way_en := req.way_en io.wb_req.bits.voluntary := true.B io.mem_acquire.valid := state === s_refill_req && grantackq.io.enq.ready io.mem_acquire.bits := edge.AcquireBlock( fromSource = id.U, toAddress = Cat(io.tag, req_idx) << blockOffBits, lgSize = lgCacheBlockBytes.U, growPermissions = grow_param)._2 io.meta_read.valid := state === s_drain_rpq io.meta_read.bits.idx := req_idx io.meta_read.bits.tag := io.tag io.meta_read.bits.way_en := ~(0.U(nWays.W)) io.replay.valid := state === s_drain_rpq && rpq.io.deq.valid io.replay.bits := rpq.io.deq.bits io.replay.bits.phys := true.B io.replay.bits.addr := Cat(io.tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0)) when (!io.meta_read.ready) { rpq.io.deq.ready := false.B io.replay.bits.cmd := M_FLUSH_ALL /* nop */ } } class MSHRFile(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) { val io = IO(new Bundle { val req = Flipped(Decoupled(new MSHRReq)) val resp = Decoupled(new HellaCacheResp) val secondary_miss = Output(Bool()) val mem_acquire = Decoupled(new TLBundleA(edge.bundle)) val mem_grant = Flipped(Valid(new TLBundleD(edge.bundle))) val mem_finish = Decoupled(new TLBundleE(edge.bundle)) val refill = Output(new L1RefillReq()) val meta_read = Decoupled(new L1MetaReadReq) val meta_write = Decoupled(new L1MetaWriteReq) val replay = Decoupled(new Replay) val wb_req = Decoupled(new WritebackReq(edge.bundle)) val probe_rdy = Output(Bool()) val fence_rdy = Output(Bool()) val replay_next = Output(Bool()) val store_pending = Output(Bool()) }) // determine if the request is cacheable or not val cacheable = edge.manager.supportsAcquireBFast(io.req.bits.addr, lgCacheBlockBytes.U) val sdq_val = RegInit(0.U(cfg.nSDQ.W)) val sdq_alloc_id = PriorityEncoder(~sdq_val(cfg.nSDQ-1,0)) val sdq_rdy = !sdq_val.andR val sdq_enq = io.req.valid && io.req.ready && cacheable && isWrite(io.req.bits.cmd) val sdq = Mem(cfg.nSDQ, UInt(coreDataBits.W)) when (sdq_enq) { sdq(sdq_alloc_id) := io.req.bits.data } val idxMatch = Wire(Vec(cfg.nMSHRs, Bool())) val tagList = Wire(Vec(cfg.nMSHRs, Bits(tagBits.W))) val tag_match = Mux1H(idxMatch, tagList) === io.req.bits.addr >> untagBits val wbTagList = Wire(Vec(cfg.nMSHRs, Bits())) val refillMux = Wire(Vec(cfg.nMSHRs, new L1RefillReq)) val meta_read_arb = Module(new Arbiter(new L1MetaReadReq, cfg.nMSHRs)) val meta_write_arb = Module(new Arbiter(new L1MetaWriteReq, cfg.nMSHRs)) val wb_req_arb = Module(new Arbiter(new WritebackReq(edge.bundle), cfg.nMSHRs)) val replay_arb = Module(new Arbiter(new ReplayInternal, cfg.nMSHRs)) val alloc_arb = Module(new Arbiter(Bool(), cfg.nMSHRs)) alloc_arb.io.in.foreach(_.bits := DontCare) var idx_match = false.B var pri_rdy = false.B var sec_rdy = false.B io.fence_rdy := true.B io.probe_rdy := true.B val mshrs = (0 until cfg.nMSHRs) map { i => val mshr = Module(new MSHR(i)) idxMatch(i) := mshr.io.idx_match tagList(i) := mshr.io.tag wbTagList(i) := mshr.io.wb_req.bits.tag alloc_arb.io.in(i).valid := mshr.io.req_pri_rdy mshr.io.req_pri_val := alloc_arb.io.in(i).ready mshr.io.req_sec_val := io.req.valid && sdq_rdy && tag_match mshr.io.req_bits.viewAsSupertype(new HellaCacheReqInternal) := io.req.bits.viewAsSupertype(new HellaCacheReqInternal) mshr.io.req_bits.tag_match := io.req.bits.tag_match mshr.io.req_bits.old_meta := io.req.bits.old_meta mshr.io.req_bits.way_en := io.req.bits.way_en mshr.io.req_bits.sdq_id := sdq_alloc_id meta_read_arb.io.in(i) <> mshr.io.meta_read meta_write_arb.io.in(i) <> mshr.io.meta_write wb_req_arb.io.in(i) <> mshr.io.wb_req replay_arb.io.in(i) <> mshr.io.replay mshr.io.mem_grant.valid := io.mem_grant.valid && io.mem_grant.bits.source === i.U mshr.io.mem_grant.bits := io.mem_grant.bits refillMux(i) := mshr.io.refill pri_rdy = pri_rdy || mshr.io.req_pri_rdy sec_rdy = sec_rdy || mshr.io.req_sec_rdy idx_match = idx_match || mshr.io.idx_match when (!mshr.io.req_pri_rdy) { io.fence_rdy := false.B } when (!mshr.io.probe_rdy) { io.probe_rdy := false.B } mshr } alloc_arb.io.out.ready := io.req.valid && sdq_rdy && cacheable && !idx_match io.meta_read <> meta_read_arb.io.out io.meta_write <> meta_write_arb.io.out io.wb_req <> wb_req_arb.io.out val mmio_alloc_arb = Module(new Arbiter(Bool(), nIOMSHRs)) mmio_alloc_arb.io.in.foreach(_.bits := DontCare) val resp_arb = Module(new Arbiter(new HellaCacheResp, nIOMSHRs)) var mmio_rdy = false.B io.replay_next := false.B val mmios = (0 until nIOMSHRs) map { i => val id = cfg.nMSHRs + i val mshr = Module(new IOMSHR(id)) mmio_alloc_arb.io.in(i).valid := mshr.io.req.ready mshr.io.req.valid := mmio_alloc_arb.io.in(i).ready mshr.io.req.bits := io.req.bits mmio_rdy = mmio_rdy || mshr.io.req.ready mshr.io.mem_ack.bits := io.mem_grant.bits mshr.io.mem_ack.valid := io.mem_grant.valid && io.mem_grant.bits.source === id.U resp_arb.io.in(i) <> mshr.io.resp when (!mshr.io.req.ready) { io.fence_rdy := false.B } when (mshr.io.replay_next) { io.replay_next := true.B } mshr } mmio_alloc_arb.io.out.ready := io.req.valid && !cacheable TLArbiter.lowestFromSeq(edge, io.mem_acquire, mshrs.map(_.io.mem_acquire) ++ mmios.map(_.io.mem_access)) TLArbiter.lowestFromSeq(edge, io.mem_finish, mshrs.map(_.io.mem_finish)) io.store_pending := sdq_val =/= 0.U || mmios.map(_.io.store_pending).orR io.resp <> resp_arb.io.out io.req.ready := Mux(!cacheable, mmio_rdy, sdq_rdy && Mux(idx_match, tag_match && sec_rdy, pri_rdy)) io.secondary_miss := idx_match io.refill := refillMux(io.mem_grant.bits.source) val free_sdq = io.replay.fire && isWrite(io.replay.bits.cmd) io.replay.bits.data := sdq(RegEnable(replay_arb.io.out.bits.sdq_id, free_sdq)) io.replay.bits.mask := 0.U io.replay.valid := replay_arb.io.out.valid replay_arb.io.out.ready := io.replay.ready io.replay.bits.viewAsSupertype(new HellaCacheReqInternal) <> replay_arb.io.out.bits.viewAsSupertype(new HellaCacheReqInternal) when (io.replay.valid || sdq_enq) { sdq_val := sdq_val & ~(UIntToOH(replay_arb.io.out.bits.sdq_id) & Fill(cfg.nSDQ, free_sdq)) | PriorityEncoderOH(~sdq_val(cfg.nSDQ-1,0)) & Fill(cfg.nSDQ, sdq_enq) } } class WritebackUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) { val io = IO(new Bundle { val req = Flipped(Decoupled(new WritebackReq(edge.bundle))) val meta_read = Decoupled(new L1MetaReadReq) val data_req = Decoupled(new L1DataReadReq) val data_resp = Input(Bits(encRowBits.W)) val release = Decoupled(new TLBundleC(edge.bundle)) }) val req = Reg(new WritebackReq(edge.bundle)) val active = RegInit(false.B) val r1_data_req_fired = RegInit(false.B) val r2_data_req_fired = RegInit(false.B) val data_req_cnt = RegInit(0.U(log2Up(refillCycles+1).W)) //TODO Zero width val (_, last_beat, all_beats_done, beat_count) = edge.count(io.release) io.release.valid := false.B when (active) { r1_data_req_fired := false.B r2_data_req_fired := r1_data_req_fired when (io.data_req.fire && io.meta_read.fire) { r1_data_req_fired := true.B data_req_cnt := data_req_cnt + 1.U } when (r2_data_req_fired) { io.release.valid := true.B when(!io.release.ready) { r1_data_req_fired := false.B r2_data_req_fired := false.B data_req_cnt := data_req_cnt - Mux[UInt]((refillCycles > 1).B && r1_data_req_fired, 2.U, 1.U) } when(!r1_data_req_fired) { // We're done if this is the final data request and the Release can be sent active := data_req_cnt < refillCycles.U || !io.release.ready } } } when (io.req.fire) { active := true.B data_req_cnt := 0.U req := io.req.bits } io.req.ready := !active val fire = active && data_req_cnt < refillCycles.U // We reissue the meta read as it sets up the mux ctrl for s2_data_muxed io.meta_read.valid := fire io.meta_read.bits.idx := req.idx io.meta_read.bits.tag := req.tag io.meta_read.bits.way_en := ~(0.U(nWays.W)) io.data_req.valid := fire io.data_req.bits.way_en := req.way_en io.data_req.bits.addr := (if(refillCycles > 1) Cat(req.idx, data_req_cnt(log2Up(refillCycles)-1,0)) else req.idx) << rowOffBits val r_address = Cat(req.tag, req.idx) << blockOffBits val probeResponse = edge.ProbeAck( fromSource = req.source, toAddress = r_address, lgSize = lgCacheBlockBytes.U, reportPermissions = req.param, data = io.data_resp) val voluntaryRelease = edge.Release( fromSource = req.source, toAddress = r_address, lgSize = lgCacheBlockBytes.U, shrinkPermissions = req.param, data = io.data_resp)._2 io.release.bits := Mux(req.voluntary, voluntaryRelease, probeResponse) } class ProbeUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) { val io = IO(new Bundle { val req = Flipped(Decoupled(new TLBundleB(edge.bundle))) val rep = Decoupled(new TLBundleC(edge.bundle)) val meta_read = Decoupled(new L1MetaReadReq) val meta_write = Decoupled(new L1MetaWriteReq) val wb_req = Decoupled(new WritebackReq(edge.bundle)) val way_en = Input(Bits(nWays.W)) val mshr_rdy = Input(Bool()) val block_state = Input(new ClientMetadata()) }) val (s_invalid :: s_meta_read :: s_meta_resp :: s_mshr_req :: s_mshr_resp :: s_release :: s_writeback_req :: s_writeback_resp :: s_meta_write :: Nil) = Enum(9) val state = RegInit(s_invalid) val req = Reg(new TLBundleB(edge.bundle)) val req_idx = req.address(idxMSB, idxLSB) val req_tag = req.address >> untagBits val way_en = Reg(Bits()) val tag_matches = way_en.orR val old_coh = Reg(new ClientMetadata) val miss_coh = ClientMetadata.onReset val reply_coh = Mux(tag_matches, old_coh, miss_coh) val (is_dirty, report_param, new_coh) = reply_coh.onProbe(req.param) io.req.ready := state === s_invalid io.rep.valid := state === s_release io.rep.bits := edge.ProbeAck(req, report_param) assert(!io.rep.valid || !edge.hasData(io.rep.bits), "ProbeUnit should not send ProbeAcks with data, WritebackUnit should handle it") io.meta_read.valid := state === s_meta_read io.meta_read.bits.idx := req_idx io.meta_read.bits.tag := req_tag io.meta_read.bits.way_en := ~(0.U(nWays.W)) io.meta_write.valid := state === s_meta_write io.meta_write.bits.way_en := way_en io.meta_write.bits.idx := req_idx io.meta_write.bits.tag := req_tag io.meta_write.bits.data.tag := req_tag io.meta_write.bits.data.coh := new_coh io.wb_req.valid := state === s_writeback_req io.wb_req.bits.source := req.source io.wb_req.bits.idx := req_idx io.wb_req.bits.tag := req_tag io.wb_req.bits.param := report_param io.wb_req.bits.way_en := way_en io.wb_req.bits.voluntary := false.B // state === s_invalid when (io.req.fire) { state := s_meta_read req := io.req.bits } // state === s_meta_read when (io.meta_read.fire) { state := s_meta_resp } // we need to wait one cycle for the metadata to be read from the array when (state === s_meta_resp) { state := s_mshr_req } when (state === s_mshr_req) { old_coh := io.block_state way_en := io.way_en // if the read didn't go through, we need to retry state := Mux(io.mshr_rdy, s_mshr_resp, s_meta_read) } when (state === s_mshr_resp) { state := Mux(tag_matches && is_dirty, s_writeback_req, s_release) } when (state === s_release && io.rep.ready) { state := Mux(tag_matches, s_meta_write, s_invalid) } // state === s_writeback_req when (io.wb_req.fire) { state := s_writeback_resp } // wait for the writeback request to finish before updating the metadata when (state === s_writeback_resp && io.wb_req.ready) { state := s_meta_write } when (io.meta_write.fire) { state := s_invalid } } class DataArray(implicit p: Parameters) extends L1HellaCacheModule()(p) { val io = IO(new Bundle { val read = Flipped(Decoupled(new L1DataReadReq)) val write = Flipped(Decoupled(new L1DataWriteReq)) val resp = Output(Vec(nWays, Bits(encRowBits.W))) }) val waddr = io.write.bits.addr >> rowOffBits val raddr = io.read.bits.addr >> rowOffBits if (doNarrowRead) { for (w <- 0 until nWays by rowWords) { val wway_en = io.write.bits.way_en(w+rowWords-1,w) val rway_en = io.read.bits.way_en(w+rowWords-1,w) val resp = Wire(Vec(rowWords, Bits(encRowBits.W))) val r_raddr = RegEnable(io.read.bits.addr, io.read.valid) for (i <- 0 until resp.size) { val array = DescribedSRAM( name = s"array_${w}_${i}", desc = "Non-blocking DCache Data Array", size = nSets * refillCycles, data = Vec(rowWords, Bits(encDataBits.W)) ) when (wway_en.orR && io.write.valid && io.write.bits.wmask(i)) { val data = VecInit.fill(rowWords)(io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i)) array.write(waddr, data, wway_en.asBools) } resp(i) := array.read(raddr, rway_en.orR && io.read.valid).asUInt } for (dw <- 0 until rowWords) { val r = VecInit(resp.map(_(encDataBits*(dw+1)-1,encDataBits*dw))) val resp_mux = if (r.size == 1) r else VecInit(r(r_raddr(rowOffBits-1,wordOffBits)), r.tail:_*) io.resp(w+dw) := resp_mux.asUInt } } } else { for (w <- 0 until nWays) { val array = DescribedSRAM( name = s"array_${w}", desc = "Non-blocking DCache Data Array", size = nSets * refillCycles, data = Vec(rowWords, Bits(encDataBits.W)) ) when (io.write.bits.way_en(w) && io.write.valid) { val data = VecInit.tabulate(rowWords)(i => io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i)) array.write(waddr, data, io.write.bits.wmask.asBools) } io.resp(w) := array.read(raddr, io.read.bits.way_en(w) && io.read.valid).asUInt } } io.read.ready := true.B io.write.ready := true.B } class NonBlockingDCache(staticIdForMetadataUseOnly: Int)(implicit p: Parameters) extends HellaCache(staticIdForMetadataUseOnly)(p) { override lazy val module = new NonBlockingDCacheModule(this) } class NonBlockingDCacheModule(outer: NonBlockingDCache) extends HellaCacheModule(outer) { require(isPow2(nWays)) // TODO: relax this require(dataScratchpadSize == 0) require(!usingVM || untagBits <= pgIdxBits, s"untagBits($untagBits) > pgIdxBits($pgIdxBits)") require(!cacheParams.separateUncachedResp) // ECC is only supported on the data array require(cacheParams.tagCode.isInstanceOf[IdentityCode]) val dECC = cacheParams.dataCode io.cpu := DontCare io.errors := DontCare val wb = Module(new WritebackUnit) val prober = Module(new ProbeUnit) val mshrs = Module(new MSHRFile) io.tlb_port.req.ready := true.B io.cpu.req.ready := true.B val s1_valid = RegNext(io.cpu.req.fire, false.B) val s1_tlb_req_valid = RegNext(io.tlb_port.req.fire, false.B) val s1_tlb_req = RegEnable(io.tlb_port.req.bits, io.tlb_port.req.fire) val s1_req = Reg(new HellaCacheReq) val s1_valid_masked = s1_valid && !io.cpu.s1_kill val s1_replay = RegInit(false.B) val s1_clk_en = Reg(Bool()) val s1_sfence = s1_req.cmd === M_SFENCE val s2_valid = RegNext(s1_valid_masked && !s1_sfence, false.B) && !io.cpu.s2_xcpt.asUInt.orR val s2_tlb_req_valid = RegNext(s1_tlb_req_valid, false.B) val s2_req = Reg(new HellaCacheReq) val s2_replay = RegNext(s1_replay, false.B) && s2_req.cmd =/= M_FLUSH_ALL val s2_recycle = Wire(Bool()) val s2_valid_masked = Wire(Bool()) val s3_valid = RegInit(false.B) val s3_req = Reg(new HellaCacheReq) val s3_way = Reg(Bits()) val s1_recycled = RegEnable(s2_recycle, false.B, s1_clk_en) val s1_read = isRead(s1_req.cmd) val s1_write = isWrite(s1_req.cmd) val s1_readwrite = s1_read || s1_write || isPrefetch(s1_req.cmd) // check for unsupported operations assert(!s1_valid || !s1_req.cmd.isOneOf(M_PWR)) val dtlb = Module(new TLB(false, log2Ceil(coreDataBytes), TLBConfig(nTLBSets, nTLBWays))) io.ptw <> dtlb.io.ptw dtlb.io.kill := io.cpu.s2_kill dtlb.io.req.valid := s1_valid && !io.cpu.s1_kill && s1_readwrite dtlb.io.req.bits.passthrough := s1_req.phys dtlb.io.req.bits.vaddr := s1_req.addr dtlb.io.req.bits.size := s1_req.size dtlb.io.req.bits.cmd := s1_req.cmd dtlb.io.req.bits.prv := s1_req.dprv dtlb.io.req.bits.v := s1_req.dv when (s1_tlb_req_valid) { dtlb.io.req.bits := s1_tlb_req } when (!dtlb.io.req.ready && !io.cpu.req.bits.phys) { io.cpu.req.ready := false.B } dtlb.io.sfence.valid := s1_valid && !io.cpu.s1_kill && s1_sfence dtlb.io.sfence.bits.rs1 := s1_req.size(0) dtlb.io.sfence.bits.rs2 := s1_req.size(1) dtlb.io.sfence.bits.addr := s1_req.addr dtlb.io.sfence.bits.asid := io.cpu.s1_data.data dtlb.io.sfence.bits.hv := s1_req.cmd === M_HFENCEV dtlb.io.sfence.bits.hg := s1_req.cmd === M_HFENCEG when (io.cpu.req.valid) { s1_req := io.cpu.req.bits } when (wb.io.meta_read.valid) { s1_req.addr := Cat(wb.io.meta_read.bits.tag, wb.io.meta_read.bits.idx) << blockOffBits s1_req.phys := true.B } when (prober.io.meta_read.valid) { s1_req.addr := Cat(prober.io.meta_read.bits.tag, prober.io.meta_read.bits.idx) << blockOffBits s1_req.phys := true.B } when (mshrs.io.replay.valid) { s1_req := mshrs.io.replay.bits } when (s2_recycle) { s1_req := s2_req } val s1_addr = Mux(s1_req.phys, s1_req.addr, dtlb.io.resp.paddr) io.tlb_port.s1_resp := dtlb.io.resp when (s1_clk_en) { s2_req.size := s1_req.size s2_req.signed := s1_req.signed s2_req.phys := s1_req.phys s2_req.addr := s1_addr s2_req.no_resp := s1_req.no_resp when (s1_write) { s2_req.data := Mux(s1_replay, mshrs.io.replay.bits.data, io.cpu.s1_data.data) } when (s1_recycled) { s2_req.data := s1_req.data } s2_req.tag := s1_req.tag s2_req.cmd := s1_req.cmd } // tags def onReset = L1Metadata(0.U, ClientMetadata.onReset) val meta = Module(new L1MetadataArray(() => onReset )) val metaReadArb = Module(new Arbiter(new L1MetaReadReq, 5)) val metaWriteArb = Module(new Arbiter(new L1MetaWriteReq, 2)) meta.io.read <> metaReadArb.io.out meta.io.write <> metaWriteArb.io.out // data val data = Module(new DataArray) val readArb = Module(new Arbiter(new L1DataReadReq, 4)) val writeArb = Module(new Arbiter(new L1DataWriteReq, 2)) data.io.write.valid := writeArb.io.out.valid writeArb.io.out.ready := data.io.write.ready data.io.write.bits := writeArb.io.out.bits val wdata_encoded = (0 until rowWords).map(i => dECC.encode(writeArb.io.out.bits.data(coreDataBits*(i+1)-1,coreDataBits*i))) data.io.write.bits.data := wdata_encoded.asUInt // tag read for new requests metaReadArb.io.in(4).valid := io.cpu.req.valid metaReadArb.io.in(4).bits.idx := io.cpu.req.bits.addr >> blockOffBits metaReadArb.io.in(4).bits.tag := io.cpu.req.bits.addr >> untagBits metaReadArb.io.in(4).bits.way_en := ~0.U(nWays.W) when (!metaReadArb.io.in(4).ready) { io.cpu.req.ready := false.B } // data read for new requests readArb.io.in(3).valid := io.cpu.req.valid readArb.io.in(3).bits.addr := io.cpu.req.bits.addr readArb.io.in(3).bits.way_en := ~0.U(nWays.W) when (!readArb.io.in(3).ready) { io.cpu.req.ready := false.B } // recycled requests metaReadArb.io.in(0).valid := s2_recycle metaReadArb.io.in(0).bits.idx := s2_req.addr >> blockOffBits metaReadArb.io.in(0).bits.way_en := ~0.U(nWays.W) metaReadArb.io.in(0).bits.tag := s2_req.tag readArb.io.in(0).valid := s2_recycle readArb.io.in(0).bits.addr := s2_req.addr readArb.io.in(0).bits.way_en := ~0.U(nWays.W) // tag check and way muxing def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f)) val s1_tag_eq_way = wayMap((w: Int) => meta.io.resp(w).tag === (s1_addr >> untagBits)).asUInt val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && meta.io.resp(w).coh.isValid()).asUInt s1_clk_en := metaReadArb.io.out.valid //TODO: should be metaReadArb.io.out.fire, but triggers Verilog backend bug val s1_writeback = s1_clk_en && !s1_valid && !s1_replay val s2_tag_match_way = RegEnable(s1_tag_match_way, s1_clk_en) val s2_tag_match = s2_tag_match_way.orR val s2_hit_state = Mux1H(s2_tag_match_way, wayMap((w: Int) => RegEnable(meta.io.resp(w).coh, s1_clk_en))) val (s2_has_permission, _, s2_new_hit_state) = s2_hit_state.onAccess(s2_req.cmd) val s2_hit = s2_tag_match && s2_has_permission && s2_hit_state === s2_new_hit_state // load-reserved/store-conditional val lrsc_count = RegInit(0.U) val lrsc_valid = lrsc_count > lrscBackoff.U val lrsc_addr = Reg(UInt()) val (s2_lr, s2_sc) = (s2_req.cmd === M_XLR, s2_req.cmd === M_XSC) val s2_lrsc_addr_match = lrsc_valid && lrsc_addr === (s2_req.addr >> blockOffBits) val s2_sc_fail = s2_sc && !s2_lrsc_addr_match when (lrsc_count > 0.U) { lrsc_count := lrsc_count - 1.U } when (s2_valid_masked && s2_hit || s2_replay) { when (s2_lr) { lrsc_count := lrscCycles.U - 1.U lrsc_addr := s2_req.addr >> blockOffBits } when (lrsc_count > 0.U) { lrsc_count := 0.U } } when (s2_valid_masked && !(s2_tag_match && s2_has_permission) && s2_lrsc_addr_match) { lrsc_count := 0.U } val s2_data = Wire(Vec(nWays, Bits(encRowBits.W))) for (w <- 0 until nWays) { val regs = Reg(Vec(rowWords, Bits(encDataBits.W))) val en1 = s1_clk_en && s1_tag_eq_way(w) for (i <- 0 until regs.size) { val en = en1 && (((i == 0).B || !doNarrowRead.B) || s1_writeback) when (en) { regs(i) := data.io.resp(w) >> encDataBits*i } } s2_data(w) := regs.asUInt } val s2_data_muxed = Mux1H(s2_tag_match_way, s2_data) val s2_data_decoded = (0 until rowWords).map(i => dECC.decode(s2_data_muxed(encDataBits*(i+1)-1,encDataBits*i))) val s2_data_corrected = s2_data_decoded.map(_.corrected).asUInt val s2_data_uncorrected = s2_data_decoded.map(_.uncorrected).asUInt val s2_word_idx = if(doNarrowRead) 0.U else s2_req.addr(log2Up(rowWords*coreDataBytes)-1,log2Up(wordBytes)) val s2_data_correctable = s2_data_decoded.map(_.correctable).asUInt(s2_word_idx) // store/amo hits s3_valid := (s2_valid_masked && s2_hit || s2_replay) && !s2_sc_fail && isWrite(s2_req.cmd) val amoalu = Module(new AMOALU(xLen)) when ((s2_valid || s2_replay) && (isWrite(s2_req.cmd) || s2_data_correctable)) { s3_req := s2_req s3_req.data := Mux(s2_data_correctable, s2_data_corrected, amoalu.io.out) s3_way := s2_tag_match_way } writeArb.io.in(0).bits.addr := s3_req.addr writeArb.io.in(0).bits.wmask := UIntToOH(s3_req.addr.extract(rowOffBits-1,offsetlsb)) writeArb.io.in(0).bits.data := Fill(rowWords, s3_req.data) writeArb.io.in(0).valid := s3_valid writeArb.io.in(0).bits.way_en := s3_way // replacement policy val replacer = cacheParams.replacement val s1_replaced_way_en = UIntToOH(replacer.way) val s2_replaced_way_en = UIntToOH(RegEnable(replacer.way, s1_clk_en)) val s2_repl_meta = Mux1H(s2_replaced_way_en, wayMap((w: Int) => RegEnable(meta.io.resp(w), s1_clk_en && s1_replaced_way_en(w))).toSeq) // miss handling mshrs.io.req.valid := s2_valid_masked && !s2_hit && (isPrefetch(s2_req.cmd) || isRead(s2_req.cmd) || isWrite(s2_req.cmd)) mshrs.io.req.bits.viewAsSupertype(new Replay) := s2_req.viewAsSupertype(new HellaCacheReq) mshrs.io.req.bits.tag_match := s2_tag_match mshrs.io.req.bits.old_meta := Mux(s2_tag_match, L1Metadata(s2_repl_meta.tag, s2_hit_state), s2_repl_meta) mshrs.io.req.bits.way_en := Mux(s2_tag_match, s2_tag_match_way, s2_replaced_way_en) mshrs.io.req.bits.data := s2_req.data when (mshrs.io.req.fire) { replacer.miss } tl_out.a <> mshrs.io.mem_acquire // replays readArb.io.in(1).valid := mshrs.io.replay.valid readArb.io.in(1).bits.addr := mshrs.io.replay.bits.addr readArb.io.in(1).bits.way_en := ~0.U(nWays.W) mshrs.io.replay.ready := readArb.io.in(1).ready s1_replay := mshrs.io.replay.valid && readArb.io.in(1).ready metaReadArb.io.in(1) <> mshrs.io.meta_read metaWriteArb.io.in(0) <> mshrs.io.meta_write // probes and releases prober.io.req.valid := tl_out.b.valid && !lrsc_valid tl_out.b.ready := prober.io.req.ready && !lrsc_valid prober.io.req.bits := tl_out.b.bits prober.io.way_en := s2_tag_match_way prober.io.block_state := s2_hit_state metaReadArb.io.in(2) <> prober.io.meta_read metaWriteArb.io.in(1) <> prober.io.meta_write prober.io.mshr_rdy := mshrs.io.probe_rdy // refills val grant_has_data = edge.hasData(tl_out.d.bits) mshrs.io.mem_grant.valid := tl_out.d.fire mshrs.io.mem_grant.bits := tl_out.d.bits tl_out.d.ready := writeArb.io.in(1).ready || !grant_has_data /* The last clause here is necessary in order to prevent the responses for * the IOMSHRs from being written into the data array. It works because the * IOMSHR ids start right the ones for the regular MSHRs. */ writeArb.io.in(1).valid := tl_out.d.valid && grant_has_data && tl_out.d.bits.source < cfg.nMSHRs.U writeArb.io.in(1).bits.addr := mshrs.io.refill.addr writeArb.io.in(1).bits.way_en := mshrs.io.refill.way_en writeArb.io.in(1).bits.wmask := ~0.U(rowWords.W) writeArb.io.in(1).bits.data := tl_out.d.bits.data(encRowBits-1,0) data.io.read <> readArb.io.out readArb.io.out.ready := !tl_out.d.valid || tl_out.d.ready // insert bubble if refill gets blocked tl_out.e <> mshrs.io.mem_finish // writebacks val wbArb = Module(new Arbiter(new WritebackReq(edge.bundle), 2)) wbArb.io.in(0) <> prober.io.wb_req wbArb.io.in(1) <> mshrs.io.wb_req wb.io.req <> wbArb.io.out metaReadArb.io.in(3) <> wb.io.meta_read readArb.io.in(2) <> wb.io.data_req wb.io.data_resp := s2_data_corrected TLArbiter.lowest(edge, tl_out.c, wb.io.release, prober.io.rep) // store->load bypassing val s4_valid = RegNext(s3_valid, false.B) val s4_req = RegEnable(s3_req, s3_valid && metaReadArb.io.out.valid) val bypasses = List( ((s2_valid_masked || s2_replay) && !s2_sc_fail, s2_req, amoalu.io.out), (s3_valid, s3_req, s3_req.data), (s4_valid, s4_req, s4_req.data) ).map(r => (r._1 && (s1_addr >> wordOffBits === r._2.addr >> wordOffBits) && isWrite(r._2.cmd), r._3)) val s2_store_bypass_data = Reg(Bits(coreDataBits.W)) val s2_store_bypass = Reg(Bool()) when (s1_clk_en) { s2_store_bypass := false.B when (bypasses.map(_._1).reduce(_||_)) { s2_store_bypass_data := PriorityMux(bypasses) s2_store_bypass := true.B } } // load data subword mux/sign extension val s2_data_word_prebypass = s2_data_uncorrected >> Cat(s2_word_idx, 0.U(log2Up(coreDataBits).W)) val s2_data_word = Mux(s2_store_bypass, s2_store_bypass_data, s2_data_word_prebypass) val loadgen = new LoadGen(s2_req.size, s2_req.signed, s2_req.addr, s2_data_word, s2_sc, wordBytes) amoalu.io.mask := new StoreGen(s2_req.size, s2_req.addr, 0.U, xLen/8).mask amoalu.io.cmd := s2_req.cmd amoalu.io.lhs := s2_data_word amoalu.io.rhs := s2_req.data // nack it like it's hot val s1_nack = dtlb.io.req.valid && dtlb.io.resp.miss || io.cpu.s2_nack || s1_tlb_req_valid || s1_req.addr(idxMSB,idxLSB) === prober.io.meta_write.bits.idx && !prober.io.req.ready val s2_nack_hit = RegEnable(s1_nack, s1_valid || s1_replay) when (s2_nack_hit) { mshrs.io.req.valid := false.B } val s2_nack_victim = s2_hit && mshrs.io.secondary_miss val s2_nack_miss = !s2_hit && !mshrs.io.req.ready val s2_nack = s2_nack_hit || s2_nack_victim || s2_nack_miss s2_valid_masked := s2_valid && !s2_nack && !io.cpu.s2_kill val s2_recycle_ecc = (s2_valid || s2_replay) && s2_hit && s2_data_correctable val s2_recycle_next = RegInit(false.B) when (s1_valid || s1_replay) { s2_recycle_next := s2_recycle_ecc } s2_recycle := s2_recycle_ecc || s2_recycle_next // after a nack, block until nack condition resolves to save energy val block_miss = RegInit(false.B) block_miss := (s2_valid || block_miss) && s2_nack_miss when (block_miss || s1_nack) { io.cpu.req.ready := false.B } val cache_resp = Wire(Valid(new HellaCacheResp)) cache_resp.valid := (s2_replay || s2_valid_masked && s2_hit) && !s2_data_correctable cache_resp.bits.addr := s2_req.addr cache_resp.bits.idx.foreach(_ := s2_req.idx.get) cache_resp.bits.tag := s2_req.tag cache_resp.bits.cmd := s2_req.cmd cache_resp.bits.size := s2_req.size cache_resp.bits.signed := s2_req.signed cache_resp.bits.dprv := s2_req.dprv cache_resp.bits.dv := s2_req.dv cache_resp.bits.data_word_bypass := loadgen.wordData cache_resp.bits.data_raw := s2_data_word cache_resp.bits.mask := s2_req.mask cache_resp.bits.has_data := isRead(s2_req.cmd) cache_resp.bits.data := loadgen.data | s2_sc_fail cache_resp.bits.store_data := s2_req.data cache_resp.bits.replay := s2_replay val uncache_resp = Wire(Valid(new HellaCacheResp)) uncache_resp.bits := mshrs.io.resp.bits uncache_resp.valid := mshrs.io.resp.valid mshrs.io.resp.ready := RegNext(!(s1_valid || s1_replay)) io.cpu.s2_nack := s2_valid && s2_nack io.cpu.resp := Mux(mshrs.io.resp.ready, uncache_resp, cache_resp) io.cpu.resp.bits.data_word_bypass := loadgen.wordData io.cpu.resp.bits.data_raw := s2_data_word io.cpu.ordered := mshrs.io.fence_rdy && !s1_valid && !s2_valid io.cpu.store_pending := mshrs.io.store_pending io.cpu.replay_next := (s1_replay && s1_read) || mshrs.io.replay_next val s1_xcpt_valid = dtlb.io.req.valid && !s1_nack val s1_xcpt = dtlb.io.resp io.cpu.s2_xcpt := Mux(RegNext(s1_xcpt_valid), RegEnable(s1_xcpt, s1_clk_en), 0.U.asTypeOf(s1_xcpt)) io.cpu.s2_uncached := false.B io.cpu.s2_paddr := s2_req.addr // performance events io.cpu.perf.acquire := edge.done(tl_out.a) io.cpu.perf.release := edge.done(tl_out.c) io.cpu.perf.tlbMiss := io.ptw.req.fire // no clock-gating support io.cpu.clock_enabled := true.B } File Consts.scala: // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket.constants import chisel3._ import chisel3.util._ import freechips.rocketchip.util._ trait ScalarOpConstants { val SZ_BR = 3 def BR_X = BitPat("b???") def BR_EQ = 0.U(3.W) def BR_NE = 1.U(3.W) def BR_J = 2.U(3.W) def BR_N = 3.U(3.W) def BR_LT = 4.U(3.W) def BR_GE = 5.U(3.W) def BR_LTU = 6.U(3.W) def BR_GEU = 7.U(3.W) def A1_X = BitPat("b??") def A1_ZERO = 0.U(2.W) def A1_RS1 = 1.U(2.W) def A1_PC = 2.U(2.W) def A1_RS1SHL = 3.U(2.W) def IMM_X = BitPat("b???") def IMM_S = 0.U(3.W) def IMM_SB = 1.U(3.W) def IMM_U = 2.U(3.W) def IMM_UJ = 3.U(3.W) def IMM_I = 4.U(3.W) def IMM_Z = 5.U(3.W) def A2_X = BitPat("b???") def A2_ZERO = 0.U(3.W) def A2_SIZE = 1.U(3.W) def A2_RS2 = 2.U(3.W) def A2_IMM = 3.U(3.W) def A2_RS2OH = 4.U(3.W) def A2_IMMOH = 5.U(3.W) def X = BitPat("b?") def N = BitPat("b0") def Y = BitPat("b1") val SZ_DW = 1 def DW_X = X def DW_32 = false.B def DW_64 = true.B def DW_XPR = DW_64 } trait MemoryOpConstants { val NUM_XA_OPS = 9 val M_SZ = 5 def M_X = BitPat("b?????"); def M_XRD = "b00000".U; // int load def M_XWR = "b00001".U; // int store def M_PFR = "b00010".U; // prefetch with intent to read def M_PFW = "b00011".U; // prefetch with intent to write def M_XA_SWAP = "b00100".U def M_FLUSH_ALL = "b00101".U // flush all lines def M_XLR = "b00110".U def M_XSC = "b00111".U def M_XA_ADD = "b01000".U def M_XA_XOR = "b01001".U def M_XA_OR = "b01010".U def M_XA_AND = "b01011".U def M_XA_MIN = "b01100".U def M_XA_MAX = "b01101".U def M_XA_MINU = "b01110".U def M_XA_MAXU = "b01111".U def M_FLUSH = "b10000".U // write back dirty data and cede R/W permissions def M_PWR = "b10001".U // partial (masked) store def M_PRODUCE = "b10010".U // write back dirty data and cede W permissions def M_CLEAN = "b10011".U // write back dirty data and retain R/W permissions def M_SFENCE = "b10100".U // SFENCE.VMA def M_HFENCEV = "b10101".U // HFENCE.VVMA def M_HFENCEG = "b10110".U // HFENCE.GVMA def M_WOK = "b10111".U // check write permissions but don't perform a write def M_HLVX = "b10000".U // HLVX instruction def isAMOLogical(cmd: UInt) = cmd.isOneOf(M_XA_SWAP, M_XA_XOR, M_XA_OR, M_XA_AND) def isAMOArithmetic(cmd: UInt) = cmd.isOneOf(M_XA_ADD, M_XA_MIN, M_XA_MAX, M_XA_MINU, M_XA_MAXU) def isAMO(cmd: UInt) = isAMOLogical(cmd) || isAMOArithmetic(cmd) def isPrefetch(cmd: UInt) = cmd === M_PFR || cmd === M_PFW def isRead(cmd: UInt) = cmd.isOneOf(M_XRD, M_HLVX, M_XLR, M_XSC) || isAMO(cmd) def isWrite(cmd: UInt) = cmd === M_XWR || cmd === M_PWR || cmd === M_XSC || isAMO(cmd) def isWriteIntent(cmd: UInt) = isWrite(cmd) || cmd === M_PFW || cmd === M_XLR } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.diplomacy.{ AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry, IdRange, RegionType, TransferSizes } import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions} import freechips.rocketchip.util.{ AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase, CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct } import scala.math.max //These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel case class TLMasterToSlaveTransferSizes( // Supports both Acquire+Release of the following two sizes: acquireT: TransferSizes = TransferSizes.none, acquireB: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none) extends TLCommonTransferSizes { def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .intersect(rhs.acquireT), acquireB = acquireB .intersect(rhs.acquireB), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint)) def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .mincover(rhs.acquireT), acquireB = acquireB .mincover(rhs.acquireB), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint)) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(acquireT, "T"), str(acquireB, "B"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""acquireT = ${acquireT} |acquireB = ${acquireB} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLMasterToSlaveTransferSizes { def unknownEmits = TLMasterToSlaveTransferSizes( acquireT = TransferSizes(1, 4096), acquireB = TransferSizes(1, 4096), arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096)) def unknownSupports = TLMasterToSlaveTransferSizes() } //These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel case class TLSlaveToMasterTransferSizes( probe: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none ) extends TLCommonTransferSizes { def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .intersect(rhs.probe), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint) ) def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .mincover(rhs.probe), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint) ) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(probe, "P"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""probe = ${probe} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLSlaveToMasterTransferSizes { def unknownEmits = TLSlaveToMasterTransferSizes( arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096), probe = TransferSizes(1, 4096)) def unknownSupports = TLSlaveToMasterTransferSizes() } trait TLCommonTransferSizes { def arithmetic: TransferSizes def logical: TransferSizes def get: TransferSizes def putFull: TransferSizes def putPartial: TransferSizes def hint: TransferSizes } class TLSlaveParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], setName: Option[String], val address: Seq[AddressSet], val regionType: RegionType.T, val executable: Boolean, val fifoId: Option[Int], val supports: TLMasterToSlaveTransferSizes, val emits: TLSlaveToMasterTransferSizes, // By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation) val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData // If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order // Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck // ReleaseAck may NEVER be denied extends SimpleProduct { def sortedAddress = address.sorted override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters] override def productPrefix = "TLSlaveParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 11 def productElement(n: Int): Any = n match { case 0 => name case 1 => address case 2 => resources case 3 => regionType case 4 => executable case 5 => fifoId case 6 => supports case 7 => emits case 8 => alwaysGrantsT case 9 => mayDenyGet case 10 => mayDenyPut case _ => throw new IndexOutOfBoundsException(n.toString) } def supportsAcquireT: TransferSizes = supports.acquireT def supportsAcquireB: TransferSizes = supports.acquireB def supportsArithmetic: TransferSizes = supports.arithmetic def supportsLogical: TransferSizes = supports.logical def supportsGet: TransferSizes = supports.get def supportsPutFull: TransferSizes = supports.putFull def supportsPutPartial: TransferSizes = supports.putPartial def supportsHint: TransferSizes = supports.hint require (!address.isEmpty, "Address cannot be empty") address.foreach { a => require (a.finite, "Address must be finite") } address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)") require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)") require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)") require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)") require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)") require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)") require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT") // Make sure that the regionType agrees with the capabilities require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected") val maxTransfer = List( // Largest supported transfer of all types supportsAcquireT.max, supportsAcquireB.max, supportsArithmetic.max, supportsLogical.max, supportsGet.max, supportsPutFull.max, supportsPutPartial.max).max val maxAddress = address.map(_.max).max val minAlignment = address.map(_.alignment).min // The device had better not support a transfer larger than its alignment require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)") def toResource: ResourceAddress = { ResourceAddress(address, ResourcePermissions( r = supportsAcquireB || supportsGet, w = supportsAcquireT || supportsPutFull, x = executable, c = supportsAcquireB, a = supportsArithmetic && supportsLogical)) } def findTreeViolation() = nodePath.find { case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false case _: SinkNode[_, _, _, _, _] => false case node => node.inputs.size != 1 } def isTree = findTreeViolation() == None def infoString = { s"""Slave Name = ${name} |Slave Address = ${address} |supports = ${supports.infoString} | |""".stripMargin } def v1copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { new TLSlaveParameters( setName = setName, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = emits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: Option[String] = setName, address: Seq[AddressSet] = address, regionType: RegionType.T = regionType, executable: Boolean = executable, fifoId: Option[Int] = fifoId, supports: TLMasterToSlaveTransferSizes = supports, emits: TLSlaveToMasterTransferSizes = emits, alwaysGrantsT: Boolean = alwaysGrantsT, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } @deprecated("Use v1copy instead of copy","") def copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { v1copy( address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supportsAcquireT = supportsAcquireT, supportsAcquireB = supportsAcquireB, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } } object TLSlaveParameters { def v1( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = { new TLSlaveParameters( setName = None, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLSlaveToMasterTransferSizes.unknownEmits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2( address: Seq[AddressSet], nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Seq(), name: Option[String] = None, regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, fifoId: Option[Int] = None, supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports, emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits, alwaysGrantsT: Boolean = false, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } } object TLManagerParameters { @deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","") def apply( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = TLSlaveParameters.v1( address, resources, regionType, executable, nodePath, supportsAcquireT, supportsAcquireB, supportsArithmetic, supportsLogical, supportsGet, supportsPutFull, supportsPutPartial, supportsHint, mayDenyGet, mayDenyPut, alwaysGrantsT, fifoId, ) } case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int]) { def members = Seq(a, b, c, d) members.collect { case Some(beatBytes) => require (isPow2(beatBytes), "Data channel width must be a power of 2") } } object TLChannelBeatBytes{ def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes( Some(beatBytes), Some(beatBytes), Some(beatBytes), Some(beatBytes)) def apply(): TLChannelBeatBytes = TLChannelBeatBytes( None, None, None, None) } class TLSlavePortParameters private( val slaves: Seq[TLSlaveParameters], val channelBytes: TLChannelBeatBytes, val endSinkId: Int, val minLatency: Int, val responseFields: Seq[BundleFieldBase], val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct { def sortedSlaves = slaves.sortBy(_.sortedAddress.head) override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters] override def productPrefix = "TLSlavePortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => slaves case 1 => channelBytes case 2 => endSinkId case 3 => minLatency case 4 => responseFields case 5 => requestKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!slaves.isEmpty, "Slave ports must have slaves") require (endSinkId >= 0, "Sink ids cannot be negative") require (minLatency >= 0, "Minimum required latency cannot be negative") // Using this API implies you cannot handle mixed-width busses def beatBytes = { channelBytes.members.foreach { width => require (width.isDefined && width == channelBytes.a) } channelBytes.a.get } // TODO this should be deprecated def managers = slaves def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = { val relevant = slaves.filter(m => policy(m)) relevant.foreach { m => require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ") } } // Bounds on required sizes def maxAddress = slaves.map(_.maxAddress).max def maxTransfer = slaves.map(_.maxTransfer).max def mayDenyGet = slaves.exists(_.mayDenyGet) def mayDenyPut = slaves.exists(_.mayDenyPut) // Diplomatically determined operation sizes emitted by all outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _) // Operation Emitted by at least one outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _) val allSupportAcquireT = allSupportClaims.acquireT val allSupportAcquireB = allSupportClaims.acquireB val allSupportArithmetic = allSupportClaims.arithmetic val allSupportLogical = allSupportClaims.logical val allSupportGet = allSupportClaims.get val allSupportPutFull = allSupportClaims.putFull val allSupportPutPartial = allSupportClaims.putPartial val allSupportHint = allSupportClaims.hint // Operation supported by at least one outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _) val anySupportAcquireT = !anySupportClaims.acquireT.none val anySupportAcquireB = !anySupportClaims.acquireB.none val anySupportArithmetic = !anySupportClaims.arithmetic.none val anySupportLogical = !anySupportClaims.logical.none val anySupportGet = !anySupportClaims.get.none val anySupportPutFull = !anySupportClaims.putFull.none val anySupportPutPartial = !anySupportClaims.putPartial.none val anySupportHint = !anySupportClaims.hint.none // Supporting Acquire means being routable for GrantAck require ((endSinkId == 0) == !anySupportAcquireB) // These return Option[TLSlaveParameters] for your convenience def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address))) // The safe version will check the entire address def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _))) // The fast version assumes the address is valid (you probably want fastProperty instead of this function) def findFast(address: UInt) = { val routingMask = AddressDecoder(slaves.map(_.address)) VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _))) } // Compute the simplest AddressSets that decide a key def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = { val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) => k -> vs.flatMap(_._2) } val reductionMask = AddressDecoder(groups.map(_._2)) groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) } } // Select a property def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D = Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) }) // Note: returns the actual fifoId + 1 or 0 if None def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U) def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B) // Does this Port manage this ID/address? def containsSafe(address: UInt) = findSafe(address).reduce(_ || _) private def addressHelper( // setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity safe: Boolean, // member filters out the sizes being checked based on the opcode being emitted or supported member: TLSlaveParameters => TransferSizes, address: UInt, lgSize: UInt, // range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity range: Option[TransferSizes]): Bool = { // trim reduces circuit complexity by intersecting checked sizes with the range argument def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x) // groupBy returns an unordered map, convert back to Seq and sort the result for determinism // groupByIntoSeq is turning slaves into trimmed membership sizes // We are grouping all the slaves by their transfer size where // if they support the trimmed size then // member is the type of transfer that you are looking for (What you are trying to filter on) // When you consider membership, you are trimming the sizes to only the ones that you care about // you are filtering the slaves based on both whether they support a particular opcode and the size // Grouping the slaves based on the actual transfer size range they support // intersecting the range and checking their membership // FOR SUPPORTCASES instead of returning the list of slaves, // you are returning a map from transfer size to the set of // address sets that are supported for that transfer size // find all the slaves that support a certain type of operation and then group their addresses by the supported size // for every size there could be multiple address ranges // safety is a trade off between checking between all possible addresses vs only the addresses // that are known to have supported sizes // the trade off is 'checking all addresses is a more expensive circuit but will always give you // the right answer even if you give it an illegal address' // the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer // fast presumes address legality // This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies. // In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size. val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) => k -> vs.flatMap(_.address) } // safe produces a circuit that compares against all possible addresses, // whereas fast presumes that the address is legal but uses an efficient address decoder val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2)) // Simplified creates the most concise possible representation of each cases' address sets based on the mask. val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) } simplified.map { case (s, a) => // s is a size, you are checking for this size either the size of the operation is in s // We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire. ((Some(s) == range).B || s.containsLg(lgSize)) && a.map(_.contains(address)).reduce(_||_) }.foldLeft(false.B)(_||_) } def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range) def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range) def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range) def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range) def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range) def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range) def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range) def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range) def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range) def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range) def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range) def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range) def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range) def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range) def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range) def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range) def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range) def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range) def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range) def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range) def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range) def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range) def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range) def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption def isTree = !slaves.exists(!_.isTree) def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString def v1copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = managers, channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } def v2copy( slaves: Seq[TLSlaveParameters] = slaves, channelBytes: TLChannelBeatBytes = channelBytes, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = slaves, channelBytes = channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } @deprecated("Use v1copy instead of copy","") def copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { v1copy( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } object TLSlavePortParameters { def v1( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { new TLSlavePortParameters( slaves = managers, channelBytes = TLChannelBeatBytes(beatBytes), endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } } object TLManagerPortParameters { @deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","") def apply( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { TLSlavePortParameters.v1( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } class TLMasterParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], val name: String, val visibility: Seq[AddressSet], val unusedRegionTypes: Set[RegionType.T], val executesOnly: Boolean, val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C. val supports: TLSlaveToMasterTransferSizes, val emits: TLMasterToSlaveTransferSizes, val neverReleasesData: Boolean, val sourceId: IdRange) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters] override def productPrefix = "TLMasterParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 10 def productElement(n: Int): Any = n match { case 0 => name case 1 => sourceId case 2 => resources case 3 => visibility case 4 => unusedRegionTypes case 5 => executesOnly case 6 => requestFifo case 7 => supports case 8 => emits case 9 => neverReleasesData case _ => throw new IndexOutOfBoundsException(n.toString) } require (!sourceId.isEmpty) require (!visibility.isEmpty) require (supports.putFull.contains(supports.putPartial)) // We only support these operations if we support Probe (ie: we're a cache) require (supports.probe.contains(supports.arithmetic)) require (supports.probe.contains(supports.logical)) require (supports.probe.contains(supports.get)) require (supports.probe.contains(supports.putFull)) require (supports.probe.contains(supports.putPartial)) require (supports.probe.contains(supports.hint)) visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } val maxTransfer = List( supports.probe.max, supports.arithmetic.max, supports.logical.max, supports.get.max, supports.putFull.max, supports.putPartial.max).max def infoString = { s"""Master Name = ${name} |visibility = ${visibility} |emits = ${emits.infoString} |sourceId = ${sourceId} | |""".stripMargin } def v1copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { new TLMasterParameters( nodePath = nodePath, resources = this.resources, name = name, visibility = visibility, unusedRegionTypes = this.unusedRegionTypes, executesOnly = this.executesOnly, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = this.emits, neverReleasesData = this.neverReleasesData, sourceId = sourceId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: String = name, visibility: Seq[AddressSet] = visibility, unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes, executesOnly: Boolean = executesOnly, requestFifo: Boolean = requestFifo, supports: TLSlaveToMasterTransferSizes = supports, emits: TLMasterToSlaveTransferSizes = emits, neverReleasesData: Boolean = neverReleasesData, sourceId: IdRange = sourceId) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } @deprecated("Use v1copy instead of copy","") def copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { v1copy( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } object TLMasterParameters { def v1( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { new TLMasterParameters( nodePath = nodePath, resources = Nil, name = name, visibility = visibility, unusedRegionTypes = Set(), executesOnly = false, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData = false, sourceId = sourceId) } def v2( nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Nil, name: String, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), unusedRegionTypes: Set[RegionType.T] = Set(), executesOnly: Boolean = false, requestFifo: Boolean = false, supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports, emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData: Boolean = false, sourceId: IdRange = IdRange(0,1)) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } } object TLClientParameters { @deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","") def apply( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet.everything), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { TLMasterParameters.v1( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } class TLMasterPortParameters private( val masters: Seq[TLMasterParameters], val channelBytes: TLChannelBeatBytes, val minLatency: Int, val echoFields: Seq[BundleFieldBase], val requestFields: Seq[BundleFieldBase], val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters] override def productPrefix = "TLMasterPortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => masters case 1 => channelBytes case 2 => minLatency case 3 => echoFields case 4 => requestFields case 5 => responseKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!masters.isEmpty) require (minLatency >= 0) def clients = masters // Require disjoint ranges for Ids IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) => require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}") } // Bounds on required sizes def endSourceId = masters.map(_.sourceId.end).max def maxTransfer = masters.map(_.maxTransfer).max // The unused sources < endSourceId def unusedSources: Seq[Int] = { val usedSources = masters.map(_.sourceId).sortBy(_.start) ((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) => end until start } } // Diplomatically determined operation sizes emitted by all inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = masters.map(_.emits).reduce( _ intersect _) // Diplomatically determined operation sizes Emitted by at least one inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all inward Masters // as opposed to supports* which generate circuitry to check which specific addresses val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _) val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _) val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _) val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _) val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _) val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _) val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _) // Diplomatically determined operation sizes supported by at least one master // as opposed to supports* which generate circuitry to check which specific addresses val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _) val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _) val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _) val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _) val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _) val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _) val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _) // These return Option[TLMasterParameters] for your convenience def find(id: Int) = masters.find(_.sourceId.contains(id)) // Synthesizable lookup methods def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id))) def contains(id: UInt) = find(id).reduce(_ || _) def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B)) // Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = { val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _) // this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version; // the case where there is only one group. if (allSame) member(masters(0)).containsLg(lgSize) else { // Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize Mux1H(find(id), masters.map(member(_).containsLg(lgSize))) } } // Check for support of a given operation at a specific id val supportsProbe = sourceIdHelper(_.supports.probe) _ val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _ val supportsLogical = sourceIdHelper(_.supports.logical) _ val supportsGet = sourceIdHelper(_.supports.get) _ val supportsPutFull = sourceIdHelper(_.supports.putFull) _ val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _ val supportsHint = sourceIdHelper(_.supports.hint) _ // TODO: Merge sourceIdHelper2 with sourceIdHelper private def sourceIdHelper2( member: TLMasterParameters => TransferSizes, sourceId: UInt, lgSize: UInt): Bool = { // Because sourceIds are uniquely owned by each master, we use them to group the // cases that have to be checked. val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) => k -> vs.map(_.sourceId) } emitCases.map { case (s, a) => (s.containsLg(lgSize)) && a.map(_.contains(sourceId)).reduce(_||_) }.foldLeft(false.B)(_||_) } // Check for emit of a given operation at a specific id def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize) def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize) def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize) def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize) def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize) def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize) def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize) def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize) def infoString = masters.map(_.infoString).mkString def v1copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = clients, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2copy( masters: Seq[TLMasterParameters] = masters, channelBytes: TLChannelBeatBytes = channelBytes, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } @deprecated("Use v1copy instead of copy","") def copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { v1copy( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLClientPortParameters { @deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","") def apply( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { TLMasterPortParameters.v1( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLMasterPortParameters { def v1( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = clients, channelBytes = TLChannelBeatBytes(), minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2( masters: Seq[TLMasterParameters], channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(), minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } } case class TLBundleParameters( addressBits: Int, dataBits: Int, sourceBits: Int, sinkBits: Int, sizeBits: Int, echoFields: Seq[BundleFieldBase], requestFields: Seq[BundleFieldBase], responseFields: Seq[BundleFieldBase], hasBCE: Boolean) { // Chisel has issues with 0-width wires require (addressBits >= 1) require (dataBits >= 8) require (sourceBits >= 1) require (sinkBits >= 1) require (sizeBits >= 1) require (isPow2(dataBits)) echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") } val addrLoBits = log2Up(dataBits/8) // Used to uniquify bus IP names def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u") def union(x: TLBundleParameters) = TLBundleParameters( max(addressBits, x.addressBits), max(dataBits, x.dataBits), max(sourceBits, x.sourceBits), max(sinkBits, x.sinkBits), max(sizeBits, x.sizeBits), echoFields = BundleField.union(echoFields ++ x.echoFields), requestFields = BundleField.union(requestFields ++ x.requestFields), responseFields = BundleField.union(responseFields ++ x.responseFields), hasBCE || x.hasBCE) } object TLBundleParameters { val emptyBundleParams = TLBundleParameters( addressBits = 1, dataBits = 8, sourceBits = 1, sinkBits = 1, sizeBits = 1, echoFields = Nil, requestFields = Nil, responseFields = Nil, hasBCE = false) def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y)) def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) = new TLBundleParameters( addressBits = log2Up(slave.maxAddress + 1), dataBits = slave.beatBytes * 8, sourceBits = log2Up(master.endSourceId), sinkBits = log2Up(slave.endSinkId), sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1), echoFields = master.echoFields, requestFields = BundleField.accept(master.requestFields, slave.requestKeys), responseFields = BundleField.accept(slave.responseFields, master.responseKeys), hasBCE = master.anySupportProbe && slave.anySupportAcquireB) } case class TLEdgeParameters( master: TLMasterPortParameters, slave: TLSlavePortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { // legacy names: def manager = slave def client = master val maxTransfer = max(master.maxTransfer, slave.maxTransfer) val maxLgSize = log2Ceil(maxTransfer) // Sanity check the link... require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})") def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims) val bundle = TLBundleParameters(master, slave) def formatEdge = master.infoString + "\n" + slave.infoString } case class TLCreditedDelay( a: CreditedDelay, b: CreditedDelay, c: CreditedDelay, d: CreditedDelay, e: CreditedDelay) { def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay( a = a + that.a, b = b + that.b, c = c + that.c, d = d + that.d, e = e + that.e) override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})" } object TLCreditedDelay { def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay) } case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString} case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val delay = client.delay + manager.delay val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters) case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base)) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } // To be unified, devices must agree on all of these terms case class ManagerUnificationKey( resources: Seq[Resource], regionType: RegionType.T, executable: Boolean, supportsAcquireT: TransferSizes, supportsAcquireB: TransferSizes, supportsArithmetic: TransferSizes, supportsLogical: TransferSizes, supportsGet: TransferSizes, supportsPutFull: TransferSizes, supportsPutPartial: TransferSizes, supportsHint: TransferSizes) object ManagerUnificationKey { def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey( resources = x.resources, regionType = x.regionType, executable = x.executable, supportsAcquireT = x.supportsAcquireT, supportsAcquireB = x.supportsAcquireB, supportsArithmetic = x.supportsArithmetic, supportsLogical = x.supportsLogical, supportsGet = x.supportsGet, supportsPutFull = x.supportsPutFull, supportsPutPartial = x.supportsPutPartial, supportsHint = x.supportsHint) } object ManagerUnification { def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = { slaves.groupBy(ManagerUnificationKey.apply).values.map { seq => val agree = seq.forall(_.fifoId == seq.head.fifoId) seq(0).v1copy( address = AddressSet.unify(seq.flatMap(_.address)), fifoId = if (agree) seq(0).fifoId else None) }.toList } } case class TLBufferParams( a: BufferParams = BufferParams.none, b: BufferParams = BufferParams.none, c: BufferParams = BufferParams.none, d: BufferParams = BufferParams.none, e: BufferParams = BufferParams.none ) extends DirectedBuffers[TLBufferParams] { def copyIn(x: BufferParams) = this.copy(b = x, d = x) def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x) def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x) } /** Pretty printing of TL source id maps */ class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] { private val tlDigits = String.valueOf(tl.endSourceId-1).length() protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s" private val sorted = tl.masters.sortBy(_.sourceId) val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c => TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo) } } case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean) extends IdMapEntry { val from = tlId val to = tlId val maxTransactionsInFlight = Some(tlId.size) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module MSHR_8( // @[NBDcache.scala:150:7] input clock, // @[NBDcache.scala:150:7] input reset, // @[NBDcache.scala:150:7] input io_req_pri_val, // @[NBDcache.scala:151:14] output io_req_pri_rdy, // @[NBDcache.scala:151:14] input io_req_sec_val, // @[NBDcache.scala:151:14] output io_req_sec_rdy, // @[NBDcache.scala:151:14] input [39:0] io_req_bits_addr, // @[NBDcache.scala:151:14] input [6:0] io_req_bits_tag, // @[NBDcache.scala:151:14] input [4:0] io_req_bits_cmd, // @[NBDcache.scala:151:14] input [1:0] io_req_bits_size, // @[NBDcache.scala:151:14] input io_req_bits_signed, // @[NBDcache.scala:151:14] input [1:0] io_req_bits_dprv, // @[NBDcache.scala:151:14] input io_req_bits_dv, // @[NBDcache.scala:151:14] input io_req_bits_phys, // @[NBDcache.scala:151:14] input io_req_bits_no_resp, // @[NBDcache.scala:151:14] input io_req_bits_no_alloc, // @[NBDcache.scala:151:14] input io_req_bits_no_xcpt, // @[NBDcache.scala:151:14] input [4:0] io_req_bits_sdq_id, // @[NBDcache.scala:151:14] input io_req_bits_tag_match, // @[NBDcache.scala:151:14] input [1:0] io_req_bits_old_meta_coh_state, // @[NBDcache.scala:151:14] input [19:0] io_req_bits_old_meta_tag, // @[NBDcache.scala:151:14] input [7:0] io_req_bits_way_en, // @[NBDcache.scala:151:14] output io_idx_match, // @[NBDcache.scala:151:14] output [19:0] io_tag, // @[NBDcache.scala:151:14] input io_mem_acquire_ready, // @[NBDcache.scala:151:14] output io_mem_acquire_valid, // @[NBDcache.scala:151:14] output [2:0] io_mem_acquire_bits_param, // @[NBDcache.scala:151:14] output [31:0] io_mem_acquire_bits_address, // @[NBDcache.scala:151:14] input io_mem_grant_valid, // @[NBDcache.scala:151:14] input [2:0] io_mem_grant_bits_opcode, // @[NBDcache.scala:151:14] input [1:0] io_mem_grant_bits_param, // @[NBDcache.scala:151:14] input [3:0] io_mem_grant_bits_size, // @[NBDcache.scala:151:14] input [1:0] io_mem_grant_bits_source, // @[NBDcache.scala:151:14] input [2:0] io_mem_grant_bits_sink, // @[NBDcache.scala:151:14] input io_mem_grant_bits_denied, // @[NBDcache.scala:151:14] input [63:0] io_mem_grant_bits_data, // @[NBDcache.scala:151:14] input io_mem_grant_bits_corrupt, // @[NBDcache.scala:151:14] input io_mem_finish_ready, // @[NBDcache.scala:151:14] output io_mem_finish_valid, // @[NBDcache.scala:151:14] output [2:0] io_mem_finish_bits_sink, // @[NBDcache.scala:151:14] output [7:0] io_refill_way_en, // @[NBDcache.scala:151:14] output [11:0] io_refill_addr, // @[NBDcache.scala:151:14] input io_meta_read_ready, // @[NBDcache.scala:151:14] output io_meta_read_valid, // @[NBDcache.scala:151:14] output [5:0] io_meta_read_bits_idx, // @[NBDcache.scala:151:14] output [19:0] io_meta_read_bits_tag, // @[NBDcache.scala:151:14] input io_meta_write_ready, // @[NBDcache.scala:151:14] output io_meta_write_valid, // @[NBDcache.scala:151:14] output [5:0] io_meta_write_bits_idx, // @[NBDcache.scala:151:14] output [7:0] io_meta_write_bits_way_en, // @[NBDcache.scala:151:14] output [19:0] io_meta_write_bits_tag, // @[NBDcache.scala:151:14] output [1:0] io_meta_write_bits_data_coh_state, // @[NBDcache.scala:151:14] output [19:0] io_meta_write_bits_data_tag, // @[NBDcache.scala:151:14] input io_replay_ready, // @[NBDcache.scala:151:14] output io_replay_valid, // @[NBDcache.scala:151:14] output [39:0] io_replay_bits_addr, // @[NBDcache.scala:151:14] output [6:0] io_replay_bits_tag, // @[NBDcache.scala:151:14] output [4:0] io_replay_bits_cmd, // @[NBDcache.scala:151:14] output [1:0] io_replay_bits_size, // @[NBDcache.scala:151:14] output io_replay_bits_signed, // @[NBDcache.scala:151:14] output [1:0] io_replay_bits_dprv, // @[NBDcache.scala:151:14] output io_replay_bits_dv, // @[NBDcache.scala:151:14] output io_replay_bits_no_resp, // @[NBDcache.scala:151:14] output io_replay_bits_no_alloc, // @[NBDcache.scala:151:14] output io_replay_bits_no_xcpt, // @[NBDcache.scala:151:14] output [4:0] io_replay_bits_sdq_id, // @[NBDcache.scala:151:14] input io_wb_req_ready, // @[NBDcache.scala:151:14] output io_wb_req_valid, // @[NBDcache.scala:151:14] output [19:0] io_wb_req_bits_tag, // @[NBDcache.scala:151:14] output [5:0] io_wb_req_bits_idx, // @[NBDcache.scala:151:14] output [2:0] io_wb_req_bits_param, // @[NBDcache.scala:151:14] output [7:0] io_wb_req_bits_way_en, // @[NBDcache.scala:151:14] output io_probe_rdy // @[NBDcache.scala:151:14] ); wire [19:0] io_tag_0; // @[NBDcache.scala:150:7] wire _grantackq_io_enq_ready; // @[NBDcache.scala:263:25] wire _grantackq_io_deq_valid; // @[NBDcache.scala:263:25] wire _rpq_io_enq_ready; // @[NBDcache.scala:200:19] wire _rpq_io_deq_valid; // @[NBDcache.scala:200:19] wire [39:0] _rpq_io_deq_bits_addr; // @[NBDcache.scala:200:19] wire [4:0] _rpq_io_deq_bits_cmd; // @[NBDcache.scala:200:19] wire io_req_pri_val_0 = io_req_pri_val; // @[NBDcache.scala:150:7] wire io_req_sec_val_0 = io_req_sec_val; // @[NBDcache.scala:150:7] wire [39:0] io_req_bits_addr_0 = io_req_bits_addr; // @[NBDcache.scala:150:7] wire [6:0] io_req_bits_tag_0 = io_req_bits_tag; // @[NBDcache.scala:150:7] wire [4:0] io_req_bits_cmd_0 = io_req_bits_cmd; // @[NBDcache.scala:150:7] wire [1:0] io_req_bits_size_0 = io_req_bits_size; // @[NBDcache.scala:150:7] wire io_req_bits_signed_0 = io_req_bits_signed; // @[NBDcache.scala:150:7] wire [1:0] io_req_bits_dprv_0 = io_req_bits_dprv; // @[NBDcache.scala:150:7] wire io_req_bits_dv_0 = io_req_bits_dv; // @[NBDcache.scala:150:7] wire io_req_bits_phys_0 = io_req_bits_phys; // @[NBDcache.scala:150:7] wire io_req_bits_no_resp_0 = io_req_bits_no_resp; // @[NBDcache.scala:150:7] wire io_req_bits_no_alloc_0 = io_req_bits_no_alloc; // @[NBDcache.scala:150:7] wire io_req_bits_no_xcpt_0 = io_req_bits_no_xcpt; // @[NBDcache.scala:150:7] wire [4:0] io_req_bits_sdq_id_0 = io_req_bits_sdq_id; // @[NBDcache.scala:150:7] wire io_req_bits_tag_match_0 = io_req_bits_tag_match; // @[NBDcache.scala:150:7] wire [1:0] io_req_bits_old_meta_coh_state_0 = io_req_bits_old_meta_coh_state; // @[NBDcache.scala:150:7] wire [19:0] io_req_bits_old_meta_tag_0 = io_req_bits_old_meta_tag; // @[NBDcache.scala:150:7] wire [7:0] io_req_bits_way_en_0 = io_req_bits_way_en; // @[NBDcache.scala:150:7] wire io_mem_acquire_ready_0 = io_mem_acquire_ready; // @[NBDcache.scala:150:7] wire io_mem_grant_valid_0 = io_mem_grant_valid; // @[NBDcache.scala:150:7] wire [2:0] io_mem_grant_bits_opcode_0 = io_mem_grant_bits_opcode; // @[NBDcache.scala:150:7] wire [1:0] io_mem_grant_bits_param_0 = io_mem_grant_bits_param; // @[NBDcache.scala:150:7] wire [3:0] io_mem_grant_bits_size_0 = io_mem_grant_bits_size; // @[NBDcache.scala:150:7] wire [1:0] io_mem_grant_bits_source_0 = io_mem_grant_bits_source; // @[NBDcache.scala:150:7] wire [2:0] io_mem_grant_bits_sink_0 = io_mem_grant_bits_sink; // @[NBDcache.scala:150:7] wire io_mem_grant_bits_denied_0 = io_mem_grant_bits_denied; // @[NBDcache.scala:150:7] wire [63:0] io_mem_grant_bits_data_0 = io_mem_grant_bits_data; // @[NBDcache.scala:150:7] wire io_mem_grant_bits_corrupt_0 = io_mem_grant_bits_corrupt; // @[NBDcache.scala:150:7] wire io_mem_finish_ready_0 = io_mem_finish_ready; // @[NBDcache.scala:150:7] wire io_meta_read_ready_0 = io_meta_read_ready; // @[NBDcache.scala:150:7] wire io_meta_write_ready_0 = io_meta_write_ready; // @[NBDcache.scala:150:7] wire io_replay_ready_0 = io_replay_ready; // @[NBDcache.scala:150:7] wire io_wb_req_ready_0 = io_wb_req_ready; // @[NBDcache.scala:150:7] wire [3:0] _r_T_15 = 4'hB; // @[Metadata.scala:130:10] wire [3:0] _needs_wb_r_T_15 = 4'hB; // @[Metadata.scala:130:10] wire [3:0] _r_T_16 = 4'hA; // @[Metadata.scala:131:10] wire [3:0] _needs_wb_r_T_16 = 4'hA; // @[Metadata.scala:131:10] wire [3:0] _r_T_17 = 4'h9; // @[Metadata.scala:132:10] wire [3:0] _needs_wb_r_T_17 = 4'h9; // @[Metadata.scala:132:10] wire [3:0] _r_T_18 = 4'h8; // @[Metadata.scala:133:10] wire [3:0] _needs_wb_r_T_18 = 4'h8; // @[Metadata.scala:133:10] wire [3:0] _r_T_7 = 4'h3; // @[Metadata.scala:122:10] wire [3:0] _grow_param_r_T_2 = 4'h3; // @[Metadata.scala:60:10] wire [3:0] _r1_T_2 = 4'h3; // @[Metadata.scala:60:10] wire [3:0] _r2_T_2 = 4'h3; // @[Metadata.scala:60:10] wire [3:0] _needs_wb_r_T_7 = 4'h3; // @[Metadata.scala:122:10] wire [3:0] _r_T_66 = 4'h3; // @[Metadata.scala:60:10] wire [3:0] _r_T_8 = 4'h2; // @[Metadata.scala:123:10] wire [3:0] _grow_param_r_T_4 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _r1_T_4 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _r2_T_4 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _needs_wb_r_T_8 = 4'h2; // @[Metadata.scala:123:10] wire [3:0] _r_T_68 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _r_T_9 = 4'h1; // @[Metadata.scala:124:10] wire [3:0] _grow_param_r_T_6 = 4'h1; // @[Metadata.scala:62:10] wire [3:0] _coh_on_grant_T_2 = 4'h1; // @[Metadata.scala:86:10] wire [3:0] _r1_T_6 = 4'h1; // @[Metadata.scala:62:10] wire [3:0] _r2_T_6 = 4'h1; // @[Metadata.scala:62:10] wire [3:0] _needs_wb_r_T_9 = 4'h1; // @[Metadata.scala:124:10] wire [3:0] _r_T_70 = 4'h1; // @[Metadata.scala:62:10] wire [3:0] _r_T_11 = 4'h7; // @[Metadata.scala:126:10] wire [3:0] _grow_param_r_T_8 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _r1_T_8 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _r2_T_8 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _needs_wb_r_T_11 = 4'h7; // @[Metadata.scala:126:10] wire [3:0] _r_T_72 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _grow_param_r_T_14 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _r1_T_14 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _r2_T_14 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _r_T_78 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _r_T_10 = 4'h0; // @[Metadata.scala:125:10] wire [3:0] _grow_param_r_T_16 = 4'h0; // @[Metadata.scala:68:10] wire [3:0] _coh_on_grant_T_4 = 4'h0; // @[Metadata.scala:87:10] wire [3:0] _r1_T_16 = 4'h0; // @[Metadata.scala:68:10] wire [3:0] _r2_T_16 = 4'h0; // @[Metadata.scala:68:10] wire [3:0] _needs_wb_r_T_10 = 4'h0; // @[Metadata.scala:125:10] wire [3:0] _r_T_80 = 4'h0; // @[Metadata.scala:68:10] wire [3:0] _r_T_13 = 4'h5; // @[Metadata.scala:128:10] wire [3:0] _grow_param_r_T_18 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] _r1_T_18 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] _r2_T_18 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] _needs_wb_r_T_13 = 4'h5; // @[Metadata.scala:128:10] wire [3:0] _r_T_82 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] _grow_param_r_T_22 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _r1_T_22 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _r2_T_22 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _r_T_86 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _grow_param_r_T_24 = 4'hC; // @[Metadata.scala:72:10] wire [3:0] _coh_on_grant_T_8 = 4'hC; // @[Metadata.scala:89:10] wire [3:0] _r1_T_24 = 4'hC; // @[Metadata.scala:72:10] wire [3:0] _r2_T_24 = 4'hC; // @[Metadata.scala:72:10] wire [3:0] _r_T_88 = 4'hC; // @[Metadata.scala:72:10] wire [1:0] new_coh_meta_state = 2'h0; // @[Metadata.scala:160:20] wire [1:0] _r_T_22 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _r_T_26 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _r_T_30 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _r_T_34 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _r_T_38 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _grow_param_r_T_1 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _grow_param_r_T_3 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _grow_param_r_T_5 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _grow_param_r_T_15 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _coh_on_grant_T_1 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _coh_on_grant_T_3 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r1_T_1 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r1_T_3 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r1_T_5 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r1_T_15 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r2_T_1 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r2_T_3 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r2_T_5 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r2_T_15 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _needs_wb_r_T_22 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _needs_wb_r_T_26 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _needs_wb_r_T_30 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _needs_wb_r_T_34 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _needs_wb_r_T_38 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _r_T_65 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r_T_67 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r_T_69 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r_T_79 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] new_coh_meta_1_state = 2'h0; // @[Metadata.scala:160:20] wire [1:0] _r_T_1 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _r_T_3 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _r_T_5 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _needs_wb_r_T_1 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _needs_wb_r_T_3 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _needs_wb_r_T_5 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] io_mem_acquire_bits_a_mask_sizeOH_shiftAmount = 2'h2; // @[OneHot.scala:64:49] wire [3:0] _r_T_14 = 4'h4; // @[Metadata.scala:129:10] wire [3:0] _grow_param_r_T_20 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _coh_on_grant_T_6 = 4'h4; // @[Metadata.scala:88:10] wire [3:0] _r1_T_20 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _r2_T_20 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _needs_wb_r_T_14 = 4'h4; // @[Metadata.scala:129:10] wire [3:0] _r_T_84 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _io_mem_acquire_bits_a_mask_sizeOH_T_1 = 4'h4; // @[OneHot.scala:65:12] wire [2:0] _io_mem_acquire_bits_a_mask_sizeOH_T_2 = 3'h4; // @[OneHot.scala:65:27] wire [2:0] io_mem_acquire_bits_a_mask_sizeOH = 3'h5; // @[Misc.scala:202:81] wire [1:0] _grow_param_r_T_11 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _grow_param_r_T_13 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _grow_param_r_T_21 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _grow_param_r_T_23 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _coh_on_grant_T_7 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r1_T_11 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r1_T_13 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r1_T_21 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r1_T_23 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r2_T_11 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r2_T_13 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r2_T_21 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r2_T_23 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _dirties_T = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r_T_75 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r_T_77 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r_T_85 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r_T_87 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] io_mem_acquire_bits_a_mask_lo_lo = 2'h3; // @[Misc.scala:222:10] wire [1:0] io_mem_acquire_bits_a_mask_lo_hi = 2'h3; // @[Misc.scala:222:10] wire [1:0] io_mem_acquire_bits_a_mask_hi_lo = 2'h3; // @[Misc.scala:222:10] wire [1:0] io_mem_acquire_bits_a_mask_hi_hi = 2'h3; // @[Misc.scala:222:10] wire [3:0] _grow_param_r_T_12 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] _r1_T_12 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] _r2_T_12 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] _r_T_76 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] io_mem_acquire_bits_a_mask_lo = 4'hF; // @[Misc.scala:222:10] wire [3:0] io_mem_acquire_bits_a_mask_hi = 4'hF; // @[Misc.scala:222:10] wire io_replay_bits_phys = 1'h1; // @[NBDcache.scala:150:7] wire io_wb_req_bits_voluntary = 1'h1; // @[NBDcache.scala:150:7] wire _r_T = 1'h1; // @[Metadata.scala:140:24] wire _needs_wb_r_T = 1'h1; // @[Metadata.scala:140:24] wire _io_mem_acquire_bits_legal_T_19 = 1'h1; // @[Parameters.scala:91:44] wire _io_mem_acquire_bits_legal_T_20 = 1'h1; // @[Parameters.scala:684:29] wire io_mem_acquire_bits_a_mask_sub_sub_sub_0_1 = 1'h1; // @[Misc.scala:206:21] wire io_mem_acquire_bits_a_mask_sub_sub_size = 1'h1; // @[Misc.scala:209:26] wire io_mem_acquire_bits_a_mask_sub_sub_0_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_sub_sub_1_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_sub_0_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_sub_1_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_sub_2_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_sub_3_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_size = 1'h1; // @[Misc.scala:209:26] wire io_mem_acquire_bits_a_mask_acc = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_2 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_3 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_4 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_5 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_6 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_7 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_corrupt = 1'h0; // @[NBDcache.scala:150:7] wire _r_T_2 = 1'h0; // @[Metadata.scala:140:24] wire _r_T_4 = 1'h0; // @[Metadata.scala:140:24] wire _r_T_20 = 1'h0; // @[Misc.scala:38:9] wire _r_T_24 = 1'h0; // @[Misc.scala:38:9] wire _r_T_28 = 1'h0; // @[Misc.scala:38:9] wire _grow_param_r_T_26 = 1'h0; // @[Misc.scala:35:9] wire _grow_param_r_T_29 = 1'h0; // @[Misc.scala:35:9] wire _grow_param_r_T_32 = 1'h0; // @[Misc.scala:35:9] wire _grow_param_r_T_35 = 1'h0; // @[Misc.scala:35:9] wire _grow_param_r_T_38 = 1'h0; // @[Misc.scala:35:9] wire _r1_T_26 = 1'h0; // @[Misc.scala:35:9] wire _r1_T_29 = 1'h0; // @[Misc.scala:35:9] wire _r1_T_32 = 1'h0; // @[Misc.scala:35:9] wire _r1_T_35 = 1'h0; // @[Misc.scala:35:9] wire _r1_T_38 = 1'h0; // @[Misc.scala:35:9] wire _r2_T_26 = 1'h0; // @[Misc.scala:35:9] wire _r2_T_29 = 1'h0; // @[Misc.scala:35:9] wire _r2_T_32 = 1'h0; // @[Misc.scala:35:9] wire _r2_T_35 = 1'h0; // @[Misc.scala:35:9] wire _r2_T_38 = 1'h0; // @[Misc.scala:35:9] wire _needs_wb_r_T_2 = 1'h0; // @[Metadata.scala:140:24] wire _needs_wb_r_T_4 = 1'h0; // @[Metadata.scala:140:24] wire _needs_wb_r_T_20 = 1'h0; // @[Misc.scala:38:9] wire _needs_wb_r_T_24 = 1'h0; // @[Misc.scala:38:9] wire _needs_wb_r_T_28 = 1'h0; // @[Misc.scala:38:9] wire _r_T_90 = 1'h0; // @[Misc.scala:35:9] wire _r_T_93 = 1'h0; // @[Misc.scala:35:9] wire _r_T_96 = 1'h0; // @[Misc.scala:35:9] wire _r_T_99 = 1'h0; // @[Misc.scala:35:9] wire _r_T_102 = 1'h0; // @[Misc.scala:35:9] wire _io_mem_acquire_bits_legal_T = 1'h0; // @[Parameters.scala:684:29] wire _io_mem_acquire_bits_legal_T_18 = 1'h0; // @[Parameters.scala:684:54] wire _io_mem_acquire_bits_legal_T_33 = 1'h0; // @[Parameters.scala:686:26] wire io_mem_acquire_bits_a_corrupt = 1'h0; // @[Edges.scala:346:17] wire io_mem_acquire_bits_a_mask_sub_size = 1'h0; // @[Misc.scala:209:26] wire _io_mem_acquire_bits_a_mask_sub_acc_T = 1'h0; // @[Misc.scala:215:38] wire _io_mem_acquire_bits_a_mask_sub_acc_T_1 = 1'h0; // @[Misc.scala:215:38] wire _io_mem_acquire_bits_a_mask_sub_acc_T_2 = 1'h0; // @[Misc.scala:215:38] wire _io_mem_acquire_bits_a_mask_sub_acc_T_3 = 1'h0; // @[Misc.scala:215:38] wire [63:0] io_mem_acquire_bits_data = 64'h0; // @[NBDcache.scala:150:7] wire [63:0] io_mem_acquire_bits_a_data = 64'h0; // @[Edges.scala:346:17] wire [7:0] io_mem_acquire_bits_mask = 8'hFF; // @[NBDcache.scala:150:7] wire [7:0] io_meta_read_bits_way_en = 8'hFF; // @[NBDcache.scala:150:7] wire [7:0] io_mem_acquire_bits_a_mask = 8'hFF; // @[Edges.scala:346:17] wire [7:0] _io_mem_acquire_bits_a_mask_T = 8'hFF; // @[Misc.scala:222:10] wire [7:0] _io_meta_read_bits_way_en_T = 8'hFF; // @[NBDcache.scala:308:31] wire [1:0] io_mem_acquire_bits_source = 2'h1; // @[NBDcache.scala:150:7] wire [1:0] io_wb_req_bits_source = 2'h1; // @[NBDcache.scala:150:7] wire [1:0] _grow_param_r_T_7 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _grow_param_r_T_9 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _grow_param_r_T_17 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _grow_param_r_T_19 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _coh_on_grant_T_5 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r1_T_7 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r1_T_9 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r1_T_17 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r1_T_19 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r2_T_7 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r2_T_9 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r2_T_17 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r2_T_19 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r_T_71 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r_T_73 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r_T_81 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r_T_83 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] io_mem_acquire_bits_a_source = 2'h1; // @[Edges.scala:346:17] wire [3:0] io_mem_acquire_bits_size = 4'h6; // @[NBDcache.scala:150:7] wire [3:0] _r_T_12 = 4'h6; // @[Metadata.scala:127:10] wire [3:0] _grow_param_r_T_10 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] _r1_T_10 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] _r2_T_10 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] _needs_wb_r_T_12 = 4'h6; // @[Metadata.scala:127:10] wire [3:0] _r_T_74 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] io_mem_acquire_bits_a_size = 4'h6; // @[Edges.scala:346:17] wire [2:0] io_mem_acquire_bits_opcode = 3'h6; // @[NBDcache.scala:150:7] wire [2:0] io_mem_acquire_bits_a_opcode = 3'h6; // @[Edges.scala:346:17] wire [2:0] _io_mem_acquire_bits_a_mask_sizeOH_T = 3'h6; // @[Misc.scala:202:34] wire _io_req_pri_rdy_T; // @[NBDcache.scala:275:27] wire _io_req_sec_rdy_T; // @[NBDcache.scala:276:29] wire _io_idx_match_T_1; // @[NBDcache.scala:271:41] wire [19:0] io_meta_read_bits_tag_0 = io_tag_0; // @[NBDcache.scala:150:7] wire [19:0] io_meta_write_bits_tag_0 = io_tag_0; // @[NBDcache.scala:150:7] wire [19:0] io_meta_write_bits_data_tag_0 = io_tag_0; // @[NBDcache.scala:150:7] wire _io_mem_acquire_valid_T_1; // @[NBDcache.scala:298:50] wire [2:0] io_mem_acquire_bits_a_param; // @[Edges.scala:346:17] wire [31:0] io_mem_acquire_bits_a_address; // @[Edges.scala:346:17] wire [2:0] grantackq_io_enq_bits_e_sink = io_mem_grant_bits_sink_0; // @[Edges.scala:451:17] wire _io_mem_finish_valid_T; // @[NBDcache.scala:267:49] wire _io_meta_read_valid_T; // @[NBDcache.scala:305:31] wire [5:0] req_idx; // @[NBDcache.scala:177:25] wire _io_meta_write_valid_T_2; // @[package.scala:81:59] wire [1:0] _io_meta_write_bits_data_coh_T_1_state; // @[NBDcache.scala:286:37] wire _io_replay_valid_T_1; // @[NBDcache.scala:310:44] wire _io_wb_req_valid_T; // @[NBDcache.scala:290:28] wire [2:0] shrink_param; // @[Misc.scala:38:36] wire _io_probe_rdy_T_9; // @[NBDcache.scala:281:30] wire [2:0] io_mem_acquire_bits_param_0; // @[NBDcache.scala:150:7] wire [31:0] io_mem_acquire_bits_address_0; // @[NBDcache.scala:150:7] wire io_mem_acquire_valid_0; // @[NBDcache.scala:150:7] wire [2:0] io_mem_finish_bits_sink_0; // @[NBDcache.scala:150:7] wire io_mem_finish_valid_0; // @[NBDcache.scala:150:7] wire [7:0] io_refill_way_en_0; // @[NBDcache.scala:150:7] wire [11:0] io_refill_addr_0; // @[NBDcache.scala:150:7] wire [5:0] io_meta_read_bits_idx_0; // @[NBDcache.scala:150:7] wire io_meta_read_valid_0; // @[NBDcache.scala:150:7] wire [1:0] io_meta_write_bits_data_coh_state_0; // @[NBDcache.scala:150:7] wire [5:0] io_meta_write_bits_idx_0; // @[NBDcache.scala:150:7] wire [7:0] io_meta_write_bits_way_en_0; // @[NBDcache.scala:150:7] wire io_meta_write_valid_0; // @[NBDcache.scala:150:7] wire [39:0] io_replay_bits_addr_0; // @[NBDcache.scala:150:7] wire [6:0] io_replay_bits_tag_0; // @[NBDcache.scala:150:7] wire [4:0] io_replay_bits_cmd_0; // @[NBDcache.scala:150:7] wire [1:0] io_replay_bits_size_0; // @[NBDcache.scala:150:7] wire io_replay_bits_signed_0; // @[NBDcache.scala:150:7] wire [1:0] io_replay_bits_dprv_0; // @[NBDcache.scala:150:7] wire io_replay_bits_dv_0; // @[NBDcache.scala:150:7] wire io_replay_bits_no_resp_0; // @[NBDcache.scala:150:7] wire io_replay_bits_no_alloc_0; // @[NBDcache.scala:150:7] wire io_replay_bits_no_xcpt_0; // @[NBDcache.scala:150:7] wire [4:0] io_replay_bits_sdq_id_0; // @[NBDcache.scala:150:7] wire io_replay_valid_0; // @[NBDcache.scala:150:7] wire [19:0] io_wb_req_bits_tag_0; // @[NBDcache.scala:150:7] wire [5:0] io_wb_req_bits_idx_0; // @[NBDcache.scala:150:7] wire [2:0] io_wb_req_bits_param_0; // @[NBDcache.scala:150:7] wire [7:0] io_wb_req_bits_way_en_0; // @[NBDcache.scala:150:7] wire io_wb_req_valid_0; // @[NBDcache.scala:150:7] wire io_req_pri_rdy_0; // @[NBDcache.scala:150:7] wire io_req_sec_rdy_0; // @[NBDcache.scala:150:7] wire io_idx_match_0; // @[NBDcache.scala:150:7] wire io_probe_rdy_0; // @[NBDcache.scala:150:7] reg [3:0] state; // @[NBDcache.scala:174:22] reg [39:0] req_addr; // @[NBDcache.scala:176:16] reg [6:0] req_tag; // @[NBDcache.scala:176:16] reg [4:0] req_cmd; // @[NBDcache.scala:176:16] reg [1:0] req_size; // @[NBDcache.scala:176:16] reg req_signed; // @[NBDcache.scala:176:16] reg [1:0] req_dprv; // @[NBDcache.scala:176:16] reg req_dv; // @[NBDcache.scala:176:16] reg req_phys; // @[NBDcache.scala:176:16] reg req_no_resp; // @[NBDcache.scala:176:16] reg req_no_alloc; // @[NBDcache.scala:176:16] reg req_no_xcpt; // @[NBDcache.scala:176:16] reg [4:0] req_sdq_id; // @[NBDcache.scala:176:16] reg req_tag_match; // @[NBDcache.scala:176:16] reg [1:0] req_old_meta_coh_state; // @[NBDcache.scala:176:16] reg [19:0] req_old_meta_tag; // @[NBDcache.scala:176:16] assign io_wb_req_bits_tag_0 = req_old_meta_tag; // @[NBDcache.scala:150:7, :176:16] reg [7:0] req_way_en; // @[NBDcache.scala:176:16] assign io_refill_way_en_0 = req_way_en; // @[NBDcache.scala:150:7, :176:16] assign io_meta_write_bits_way_en_0 = req_way_en; // @[NBDcache.scala:150:7, :176:16] assign io_wb_req_bits_way_en_0 = req_way_en; // @[NBDcache.scala:150:7, :176:16] assign req_idx = req_addr[11:6]; // @[NBDcache.scala:176:16, :177:25] assign io_meta_read_bits_idx_0 = req_idx; // @[NBDcache.scala:150:7, :177:25] assign io_meta_write_bits_idx_0 = req_idx; // @[NBDcache.scala:150:7, :177:25] assign io_wb_req_bits_idx_0 = req_idx; // @[NBDcache.scala:150:7, :177:25] wire [27:0] req_tag_0 = req_addr[39:12]; // @[NBDcache.scala:176:16, :178:26] wire [33:0] _req_block_addr_T = req_addr[39:6]; // @[NBDcache.scala:176:16, :179:34] wire [39:0] req_block_addr = {_req_block_addr_T, 6'h0}; // @[NBDcache.scala:179:{34,51}] wire [5:0] _idx_match_T = io_req_bits_addr_0[11:6]; // @[NBDcache.scala:150:7, :180:47] wire idx_match = req_idx == _idx_match_T; // @[NBDcache.scala:177:25, :180:{27,47}] reg [1:0] new_coh_state; // @[NBDcache.scala:182:24] wire [3:0] _r_T_6 = {2'h2, req_old_meta_coh_state}; // @[Metadata.scala:120:19] wire _r_T_19 = _r_T_6 == 4'h8; // @[Misc.scala:56:20] wire [2:0] _r_T_21 = _r_T_19 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20] wire _r_T_23 = _r_T_6 == 4'h9; // @[Misc.scala:56:20] wire [2:0] _r_T_25 = _r_T_23 ? 3'h2 : _r_T_21; // @[Misc.scala:38:36, :56:20] wire _r_T_27 = _r_T_6 == 4'hA; // @[Misc.scala:56:20] wire [2:0] _r_T_29 = _r_T_27 ? 3'h1 : _r_T_25; // @[Misc.scala:38:36, :56:20] wire _r_T_31 = _r_T_6 == 4'hB; // @[Misc.scala:56:20] wire _r_T_32 = _r_T_31; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_33 = _r_T_31 ? 3'h1 : _r_T_29; // @[Misc.scala:38:36, :56:20] wire _r_T_35 = _r_T_6 == 4'h4; // @[Misc.scala:56:20] wire _r_T_36 = ~_r_T_35 & _r_T_32; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_37 = _r_T_35 ? 3'h5 : _r_T_33; // @[Misc.scala:38:36, :56:20] wire _r_T_39 = _r_T_6 == 4'h5; // @[Misc.scala:56:20] wire _r_T_40 = ~_r_T_39 & _r_T_36; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_41 = _r_T_39 ? 3'h4 : _r_T_37; // @[Misc.scala:38:36, :56:20] wire [1:0] _r_T_42 = {1'h0, _r_T_39}; // @[Misc.scala:38:63, :56:20] wire _r_T_43 = _r_T_6 == 4'h6; // @[Misc.scala:56:20] wire _r_T_44 = ~_r_T_43 & _r_T_40; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_45 = _r_T_43 ? 3'h0 : _r_T_41; // @[Misc.scala:38:36, :56:20] wire [1:0] _r_T_46 = _r_T_43 ? 2'h1 : _r_T_42; // @[Misc.scala:38:63, :56:20] wire _r_T_47 = _r_T_6 == 4'h7; // @[Misc.scala:56:20] wire _r_T_48 = _r_T_47 | _r_T_44; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_49 = _r_T_47 ? 3'h0 : _r_T_45; // @[Misc.scala:38:36, :56:20] wire [1:0] _r_T_50 = _r_T_47 ? 2'h1 : _r_T_46; // @[Misc.scala:38:63, :56:20] wire _r_T_51 = _r_T_6 == 4'h0; // @[Misc.scala:56:20] wire _r_T_52 = ~_r_T_51 & _r_T_48; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_53 = _r_T_51 ? 3'h5 : _r_T_49; // @[Misc.scala:38:36, :56:20] wire [1:0] _r_T_54 = _r_T_51 ? 2'h0 : _r_T_50; // @[Misc.scala:38:63, :56:20] wire _r_T_55 = _r_T_6 == 4'h1; // @[Misc.scala:56:20] wire _r_T_56 = ~_r_T_55 & _r_T_52; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_57 = _r_T_55 ? 3'h4 : _r_T_53; // @[Misc.scala:38:36, :56:20] wire [1:0] _r_T_58 = _r_T_55 ? 2'h1 : _r_T_54; // @[Misc.scala:38:63, :56:20] wire _r_T_59 = _r_T_6 == 4'h2; // @[Misc.scala:56:20] wire _r_T_60 = ~_r_T_59 & _r_T_56; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_61 = _r_T_59 ? 3'h3 : _r_T_57; // @[Misc.scala:38:36, :56:20] wire [1:0] _r_T_62 = _r_T_59 ? 2'h2 : _r_T_58; // @[Misc.scala:38:63, :56:20] wire _r_T_63 = _r_T_6 == 4'h3; // @[Misc.scala:56:20] wire r_1 = _r_T_63 | _r_T_60; // @[Misc.scala:38:9, :56:20] assign shrink_param = _r_T_63 ? 3'h3 : _r_T_61; // @[Misc.scala:38:36, :56:20] assign io_wb_req_bits_param_0 = shrink_param; // @[Misc.scala:38:36] wire [1:0] r_3 = _r_T_63 ? 2'h2 : _r_T_62; // @[Misc.scala:38:63, :56:20] wire [1:0] coh_on_clear_state = r_3; // @[Misc.scala:38:63] wire _GEN = req_cmd == 5'h1; // @[NBDcache.scala:176:16] wire _grow_param_r_c_cat_T; // @[Consts.scala:90:32] assign _grow_param_r_c_cat_T = _GEN; // @[Consts.scala:90:32] wire _grow_param_r_c_cat_T_23; // @[Consts.scala:90:32] assign _grow_param_r_c_cat_T_23 = _GEN; // @[Consts.scala:90:32] wire _coh_on_grant_c_cat_T; // @[Consts.scala:90:32] assign _coh_on_grant_c_cat_T = _GEN; // @[Consts.scala:90:32] wire _coh_on_grant_c_cat_T_23; // @[Consts.scala:90:32] assign _coh_on_grant_c_cat_T_23 = _GEN; // @[Consts.scala:90:32] wire _r1_c_cat_T; // @[Consts.scala:90:32] assign _r1_c_cat_T = _GEN; // @[Consts.scala:90:32] wire _r1_c_cat_T_23; // @[Consts.scala:90:32] assign _r1_c_cat_T_23 = _GEN; // @[Consts.scala:90:32] wire _needs_second_acq_T_27; // @[Consts.scala:90:32] assign _needs_second_acq_T_27 = _GEN; // @[Consts.scala:90:32] wire _GEN_0 = req_cmd == 5'h11; // @[NBDcache.scala:176:16] wire _grow_param_r_c_cat_T_1; // @[Consts.scala:90:49] assign _grow_param_r_c_cat_T_1 = _GEN_0; // @[Consts.scala:90:49] wire _grow_param_r_c_cat_T_24; // @[Consts.scala:90:49] assign _grow_param_r_c_cat_T_24 = _GEN_0; // @[Consts.scala:90:49] wire _coh_on_grant_c_cat_T_1; // @[Consts.scala:90:49] assign _coh_on_grant_c_cat_T_1 = _GEN_0; // @[Consts.scala:90:49] wire _coh_on_grant_c_cat_T_24; // @[Consts.scala:90:49] assign _coh_on_grant_c_cat_T_24 = _GEN_0; // @[Consts.scala:90:49] wire _r1_c_cat_T_1; // @[Consts.scala:90:49] assign _r1_c_cat_T_1 = _GEN_0; // @[Consts.scala:90:49] wire _r1_c_cat_T_24; // @[Consts.scala:90:49] assign _r1_c_cat_T_24 = _GEN_0; // @[Consts.scala:90:49] wire _needs_second_acq_T_28; // @[Consts.scala:90:49] assign _needs_second_acq_T_28 = _GEN_0; // @[Consts.scala:90:49] wire _grow_param_r_c_cat_T_2 = _grow_param_r_c_cat_T | _grow_param_r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _GEN_1 = req_cmd == 5'h7; // @[NBDcache.scala:176:16] wire _grow_param_r_c_cat_T_3; // @[Consts.scala:90:66] assign _grow_param_r_c_cat_T_3 = _GEN_1; // @[Consts.scala:90:66] wire _grow_param_r_c_cat_T_26; // @[Consts.scala:90:66] assign _grow_param_r_c_cat_T_26 = _GEN_1; // @[Consts.scala:90:66] wire _coh_on_grant_c_cat_T_3; // @[Consts.scala:90:66] assign _coh_on_grant_c_cat_T_3 = _GEN_1; // @[Consts.scala:90:66] wire _coh_on_grant_c_cat_T_26; // @[Consts.scala:90:66] assign _coh_on_grant_c_cat_T_26 = _GEN_1; // @[Consts.scala:90:66] wire _r1_c_cat_T_3; // @[Consts.scala:90:66] assign _r1_c_cat_T_3 = _GEN_1; // @[Consts.scala:90:66] wire _r1_c_cat_T_26; // @[Consts.scala:90:66] assign _r1_c_cat_T_26 = _GEN_1; // @[Consts.scala:90:66] wire _needs_second_acq_T_30; // @[Consts.scala:90:66] assign _needs_second_acq_T_30 = _GEN_1; // @[Consts.scala:90:66] wire _grow_param_r_c_cat_T_4 = _grow_param_r_c_cat_T_2 | _grow_param_r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _GEN_2 = req_cmd == 5'h4; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_5; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_5 = _GEN_2; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_28; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_28 = _GEN_2; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_5; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_5 = _GEN_2; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_28; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_28 = _GEN_2; // @[package.scala:16:47] wire _r1_c_cat_T_5; // @[package.scala:16:47] assign _r1_c_cat_T_5 = _GEN_2; // @[package.scala:16:47] wire _r1_c_cat_T_28; // @[package.scala:16:47] assign _r1_c_cat_T_28 = _GEN_2; // @[package.scala:16:47] wire _needs_second_acq_T_32; // @[package.scala:16:47] assign _needs_second_acq_T_32 = _GEN_2; // @[package.scala:16:47] wire _GEN_3 = req_cmd == 5'h9; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_6; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_6 = _GEN_3; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_29; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_29 = _GEN_3; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_6; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_6 = _GEN_3; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_29; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_29 = _GEN_3; // @[package.scala:16:47] wire _r1_c_cat_T_6; // @[package.scala:16:47] assign _r1_c_cat_T_6 = _GEN_3; // @[package.scala:16:47] wire _r1_c_cat_T_29; // @[package.scala:16:47] assign _r1_c_cat_T_29 = _GEN_3; // @[package.scala:16:47] wire _needs_second_acq_T_33; // @[package.scala:16:47] assign _needs_second_acq_T_33 = _GEN_3; // @[package.scala:16:47] wire _GEN_4 = req_cmd == 5'hA; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_7; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_7 = _GEN_4; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_30; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_30 = _GEN_4; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_7; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_7 = _GEN_4; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_30; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_30 = _GEN_4; // @[package.scala:16:47] wire _r1_c_cat_T_7; // @[package.scala:16:47] assign _r1_c_cat_T_7 = _GEN_4; // @[package.scala:16:47] wire _r1_c_cat_T_30; // @[package.scala:16:47] assign _r1_c_cat_T_30 = _GEN_4; // @[package.scala:16:47] wire _needs_second_acq_T_34; // @[package.scala:16:47] assign _needs_second_acq_T_34 = _GEN_4; // @[package.scala:16:47] wire _GEN_5 = req_cmd == 5'hB; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_8; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_8 = _GEN_5; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_31; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_31 = _GEN_5; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_8; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_8 = _GEN_5; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_31; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_31 = _GEN_5; // @[package.scala:16:47] wire _r1_c_cat_T_8; // @[package.scala:16:47] assign _r1_c_cat_T_8 = _GEN_5; // @[package.scala:16:47] wire _r1_c_cat_T_31; // @[package.scala:16:47] assign _r1_c_cat_T_31 = _GEN_5; // @[package.scala:16:47] wire _needs_second_acq_T_35; // @[package.scala:16:47] assign _needs_second_acq_T_35 = _GEN_5; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_9 = _grow_param_r_c_cat_T_5 | _grow_param_r_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_10 = _grow_param_r_c_cat_T_9 | _grow_param_r_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_11 = _grow_param_r_c_cat_T_10 | _grow_param_r_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _GEN_6 = req_cmd == 5'h8; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_12; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_12 = _GEN_6; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_35; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_35 = _GEN_6; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_12; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_12 = _GEN_6; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_35; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_35 = _GEN_6; // @[package.scala:16:47] wire _r1_c_cat_T_12; // @[package.scala:16:47] assign _r1_c_cat_T_12 = _GEN_6; // @[package.scala:16:47] wire _r1_c_cat_T_35; // @[package.scala:16:47] assign _r1_c_cat_T_35 = _GEN_6; // @[package.scala:16:47] wire _needs_second_acq_T_39; // @[package.scala:16:47] assign _needs_second_acq_T_39 = _GEN_6; // @[package.scala:16:47] wire _GEN_7 = req_cmd == 5'hC; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_13; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_13 = _GEN_7; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_36; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_36 = _GEN_7; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_13; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_13 = _GEN_7; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_36; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_36 = _GEN_7; // @[package.scala:16:47] wire _r1_c_cat_T_13; // @[package.scala:16:47] assign _r1_c_cat_T_13 = _GEN_7; // @[package.scala:16:47] wire _r1_c_cat_T_36; // @[package.scala:16:47] assign _r1_c_cat_T_36 = _GEN_7; // @[package.scala:16:47] wire _needs_second_acq_T_40; // @[package.scala:16:47] assign _needs_second_acq_T_40 = _GEN_7; // @[package.scala:16:47] wire _GEN_8 = req_cmd == 5'hD; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_14; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_14 = _GEN_8; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_37; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_37 = _GEN_8; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_14; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_14 = _GEN_8; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_37; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_37 = _GEN_8; // @[package.scala:16:47] wire _r1_c_cat_T_14; // @[package.scala:16:47] assign _r1_c_cat_T_14 = _GEN_8; // @[package.scala:16:47] wire _r1_c_cat_T_37; // @[package.scala:16:47] assign _r1_c_cat_T_37 = _GEN_8; // @[package.scala:16:47] wire _needs_second_acq_T_41; // @[package.scala:16:47] assign _needs_second_acq_T_41 = _GEN_8; // @[package.scala:16:47] wire _GEN_9 = req_cmd == 5'hE; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_15; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_15 = _GEN_9; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_38; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_38 = _GEN_9; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_15; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_15 = _GEN_9; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_38; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_38 = _GEN_9; // @[package.scala:16:47] wire _r1_c_cat_T_15; // @[package.scala:16:47] assign _r1_c_cat_T_15 = _GEN_9; // @[package.scala:16:47] wire _r1_c_cat_T_38; // @[package.scala:16:47] assign _r1_c_cat_T_38 = _GEN_9; // @[package.scala:16:47] wire _needs_second_acq_T_42; // @[package.scala:16:47] assign _needs_second_acq_T_42 = _GEN_9; // @[package.scala:16:47] wire _GEN_10 = req_cmd == 5'hF; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_16; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_16 = _GEN_10; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_39; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_39 = _GEN_10; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_16; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_16 = _GEN_10; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_39; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_39 = _GEN_10; // @[package.scala:16:47] wire _r1_c_cat_T_16; // @[package.scala:16:47] assign _r1_c_cat_T_16 = _GEN_10; // @[package.scala:16:47] wire _r1_c_cat_T_39; // @[package.scala:16:47] assign _r1_c_cat_T_39 = _GEN_10; // @[package.scala:16:47] wire _needs_second_acq_T_43; // @[package.scala:16:47] assign _needs_second_acq_T_43 = _GEN_10; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_17 = _grow_param_r_c_cat_T_12 | _grow_param_r_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_18 = _grow_param_r_c_cat_T_17 | _grow_param_r_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_19 = _grow_param_r_c_cat_T_18 | _grow_param_r_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_20 = _grow_param_r_c_cat_T_19 | _grow_param_r_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_21 = _grow_param_r_c_cat_T_11 | _grow_param_r_c_cat_T_20; // @[package.scala:81:59] wire _grow_param_r_c_cat_T_22 = _grow_param_r_c_cat_T_4 | _grow_param_r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _grow_param_r_c_cat_T_25 = _grow_param_r_c_cat_T_23 | _grow_param_r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _grow_param_r_c_cat_T_27 = _grow_param_r_c_cat_T_25 | _grow_param_r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _grow_param_r_c_cat_T_32 = _grow_param_r_c_cat_T_28 | _grow_param_r_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_33 = _grow_param_r_c_cat_T_32 | _grow_param_r_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_34 = _grow_param_r_c_cat_T_33 | _grow_param_r_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_40 = _grow_param_r_c_cat_T_35 | _grow_param_r_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_41 = _grow_param_r_c_cat_T_40 | _grow_param_r_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_42 = _grow_param_r_c_cat_T_41 | _grow_param_r_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_43 = _grow_param_r_c_cat_T_42 | _grow_param_r_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_44 = _grow_param_r_c_cat_T_34 | _grow_param_r_c_cat_T_43; // @[package.scala:81:59] wire _grow_param_r_c_cat_T_45 = _grow_param_r_c_cat_T_27 | _grow_param_r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _GEN_11 = req_cmd == 5'h3; // @[NBDcache.scala:176:16] wire _grow_param_r_c_cat_T_46; // @[Consts.scala:91:54] assign _grow_param_r_c_cat_T_46 = _GEN_11; // @[Consts.scala:91:54] wire _coh_on_grant_c_cat_T_46; // @[Consts.scala:91:54] assign _coh_on_grant_c_cat_T_46 = _GEN_11; // @[Consts.scala:91:54] wire _r1_c_cat_T_46; // @[Consts.scala:91:54] assign _r1_c_cat_T_46 = _GEN_11; // @[Consts.scala:91:54] wire _needs_second_acq_T_50; // @[Consts.scala:91:54] assign _needs_second_acq_T_50 = _GEN_11; // @[Consts.scala:91:54] wire _grow_param_r_c_cat_T_47 = _grow_param_r_c_cat_T_45 | _grow_param_r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _GEN_12 = req_cmd == 5'h6; // @[NBDcache.scala:176:16] wire _grow_param_r_c_cat_T_48; // @[Consts.scala:91:71] assign _grow_param_r_c_cat_T_48 = _GEN_12; // @[Consts.scala:91:71] wire _coh_on_grant_c_cat_T_48; // @[Consts.scala:91:71] assign _coh_on_grant_c_cat_T_48 = _GEN_12; // @[Consts.scala:91:71] wire _r1_c_cat_T_48; // @[Consts.scala:91:71] assign _r1_c_cat_T_48 = _GEN_12; // @[Consts.scala:91:71] wire _needs_second_acq_T_52; // @[Consts.scala:91:71] assign _needs_second_acq_T_52 = _GEN_12; // @[Consts.scala:91:71] wire _grow_param_r_c_cat_T_49 = _grow_param_r_c_cat_T_47 | _grow_param_r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] grow_param_r_c = {_grow_param_r_c_cat_T_22, _grow_param_r_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _grow_param_r_T = {grow_param_r_c, new_coh_state}; // @[Metadata.scala:29:18, :58:19] wire _grow_param_r_T_25 = _grow_param_r_T == 4'hC; // @[Misc.scala:49:20] wire [1:0] _grow_param_r_T_27 = {1'h0, _grow_param_r_T_25}; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_28 = _grow_param_r_T == 4'hD; // @[Misc.scala:49:20] wire [1:0] _grow_param_r_T_30 = _grow_param_r_T_28 ? 2'h2 : _grow_param_r_T_27; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_31 = _grow_param_r_T == 4'h4; // @[Misc.scala:49:20] wire [1:0] _grow_param_r_T_33 = _grow_param_r_T_31 ? 2'h1 : _grow_param_r_T_30; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_34 = _grow_param_r_T == 4'h5; // @[Misc.scala:49:20] wire [1:0] _grow_param_r_T_36 = _grow_param_r_T_34 ? 2'h2 : _grow_param_r_T_33; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_37 = _grow_param_r_T == 4'h0; // @[Misc.scala:49:20] wire [1:0] _grow_param_r_T_39 = _grow_param_r_T_37 ? 2'h0 : _grow_param_r_T_36; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_40 = _grow_param_r_T == 4'hE; // @[Misc.scala:49:20] wire _grow_param_r_T_41 = _grow_param_r_T_40; // @[Misc.scala:35:9, :49:20] wire [1:0] _grow_param_r_T_42 = _grow_param_r_T_40 ? 2'h3 : _grow_param_r_T_39; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_43 = &_grow_param_r_T; // @[Misc.scala:49:20] wire _grow_param_r_T_44 = _grow_param_r_T_43 | _grow_param_r_T_41; // @[Misc.scala:35:9, :49:20] wire [1:0] _grow_param_r_T_45 = _grow_param_r_T_43 ? 2'h3 : _grow_param_r_T_42; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_46 = _grow_param_r_T == 4'h6; // @[Misc.scala:49:20] wire _grow_param_r_T_47 = _grow_param_r_T_46 | _grow_param_r_T_44; // @[Misc.scala:35:9, :49:20] wire [1:0] _grow_param_r_T_48 = _grow_param_r_T_46 ? 2'h2 : _grow_param_r_T_45; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_49 = _grow_param_r_T == 4'h7; // @[Misc.scala:49:20] wire _grow_param_r_T_50 = _grow_param_r_T_49 | _grow_param_r_T_47; // @[Misc.scala:35:9, :49:20] wire [1:0] _grow_param_r_T_51 = _grow_param_r_T_49 ? 2'h3 : _grow_param_r_T_48; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_52 = _grow_param_r_T == 4'h1; // @[Misc.scala:49:20] wire _grow_param_r_T_53 = _grow_param_r_T_52 | _grow_param_r_T_50; // @[Misc.scala:35:9, :49:20] wire [1:0] _grow_param_r_T_54 = _grow_param_r_T_52 ? 2'h1 : _grow_param_r_T_51; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_55 = _grow_param_r_T == 4'h2; // @[Misc.scala:49:20] wire _grow_param_r_T_56 = _grow_param_r_T_55 | _grow_param_r_T_53; // @[Misc.scala:35:9, :49:20] wire [1:0] _grow_param_r_T_57 = _grow_param_r_T_55 ? 2'h2 : _grow_param_r_T_54; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_58 = _grow_param_r_T == 4'h3; // @[Misc.scala:49:20] wire grow_param_r_1 = _grow_param_r_T_58 | _grow_param_r_T_56; // @[Misc.scala:35:9, :49:20] wire [1:0] grow_param = _grow_param_r_T_58 ? 2'h3 : _grow_param_r_T_57; // @[Misc.scala:35:36, :49:20] wire [1:0] grow_param_meta_state = grow_param; // @[Misc.scala:35:36] wire _coh_on_grant_c_cat_T_2 = _coh_on_grant_c_cat_T | _coh_on_grant_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _coh_on_grant_c_cat_T_4 = _coh_on_grant_c_cat_T_2 | _coh_on_grant_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _coh_on_grant_c_cat_T_9 = _coh_on_grant_c_cat_T_5 | _coh_on_grant_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_10 = _coh_on_grant_c_cat_T_9 | _coh_on_grant_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_11 = _coh_on_grant_c_cat_T_10 | _coh_on_grant_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_17 = _coh_on_grant_c_cat_T_12 | _coh_on_grant_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_18 = _coh_on_grant_c_cat_T_17 | _coh_on_grant_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_19 = _coh_on_grant_c_cat_T_18 | _coh_on_grant_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_20 = _coh_on_grant_c_cat_T_19 | _coh_on_grant_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_21 = _coh_on_grant_c_cat_T_11 | _coh_on_grant_c_cat_T_20; // @[package.scala:81:59] wire _coh_on_grant_c_cat_T_22 = _coh_on_grant_c_cat_T_4 | _coh_on_grant_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _coh_on_grant_c_cat_T_25 = _coh_on_grant_c_cat_T_23 | _coh_on_grant_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _coh_on_grant_c_cat_T_27 = _coh_on_grant_c_cat_T_25 | _coh_on_grant_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _coh_on_grant_c_cat_T_32 = _coh_on_grant_c_cat_T_28 | _coh_on_grant_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_33 = _coh_on_grant_c_cat_T_32 | _coh_on_grant_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_34 = _coh_on_grant_c_cat_T_33 | _coh_on_grant_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_40 = _coh_on_grant_c_cat_T_35 | _coh_on_grant_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_41 = _coh_on_grant_c_cat_T_40 | _coh_on_grant_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_42 = _coh_on_grant_c_cat_T_41 | _coh_on_grant_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_43 = _coh_on_grant_c_cat_T_42 | _coh_on_grant_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_44 = _coh_on_grant_c_cat_T_34 | _coh_on_grant_c_cat_T_43; // @[package.scala:81:59] wire _coh_on_grant_c_cat_T_45 = _coh_on_grant_c_cat_T_27 | _coh_on_grant_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _coh_on_grant_c_cat_T_47 = _coh_on_grant_c_cat_T_45 | _coh_on_grant_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _coh_on_grant_c_cat_T_49 = _coh_on_grant_c_cat_T_47 | _coh_on_grant_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] coh_on_grant_c = {_coh_on_grant_c_cat_T_22, _coh_on_grant_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _coh_on_grant_T = {coh_on_grant_c, io_mem_grant_bits_param_0}; // @[Metadata.scala:29:18, :84:18] wire _coh_on_grant_T_9 = _coh_on_grant_T == 4'h1; // @[Metadata.scala:84:{18,38}] wire [1:0] _coh_on_grant_T_10 = {1'h0, _coh_on_grant_T_9}; // @[Metadata.scala:84:38] wire _coh_on_grant_T_11 = _coh_on_grant_T == 4'h0; // @[Metadata.scala:84:{18,38}] wire [1:0] _coh_on_grant_T_12 = _coh_on_grant_T_11 ? 2'h2 : _coh_on_grant_T_10; // @[Metadata.scala:84:38] wire _coh_on_grant_T_13 = _coh_on_grant_T == 4'h4; // @[Metadata.scala:84:{18,38}] wire [1:0] _coh_on_grant_T_14 = _coh_on_grant_T_13 ? 2'h2 : _coh_on_grant_T_12; // @[Metadata.scala:84:38] wire _coh_on_grant_T_15 = _coh_on_grant_T == 4'hC; // @[Metadata.scala:84:{18,38}] wire [1:0] _coh_on_grant_T_16 = _coh_on_grant_T_15 ? 2'h3 : _coh_on_grant_T_14; // @[Metadata.scala:84:38] wire [1:0] coh_on_grant_state = _coh_on_grant_T_16; // @[Metadata.scala:84:38, :160:20] wire _r1_c_cat_T_2 = _r1_c_cat_T | _r1_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _r1_c_cat_T_4 = _r1_c_cat_T_2 | _r1_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _r1_c_cat_T_9 = _r1_c_cat_T_5 | _r1_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_10 = _r1_c_cat_T_9 | _r1_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_11 = _r1_c_cat_T_10 | _r1_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_17 = _r1_c_cat_T_12 | _r1_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_18 = _r1_c_cat_T_17 | _r1_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_19 = _r1_c_cat_T_18 | _r1_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_20 = _r1_c_cat_T_19 | _r1_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_21 = _r1_c_cat_T_11 | _r1_c_cat_T_20; // @[package.scala:81:59] wire _r1_c_cat_T_22 = _r1_c_cat_T_4 | _r1_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _r1_c_cat_T_25 = _r1_c_cat_T_23 | _r1_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _r1_c_cat_T_27 = _r1_c_cat_T_25 | _r1_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _r1_c_cat_T_32 = _r1_c_cat_T_28 | _r1_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_33 = _r1_c_cat_T_32 | _r1_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_34 = _r1_c_cat_T_33 | _r1_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_40 = _r1_c_cat_T_35 | _r1_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_41 = _r1_c_cat_T_40 | _r1_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_42 = _r1_c_cat_T_41 | _r1_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_43 = _r1_c_cat_T_42 | _r1_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_44 = _r1_c_cat_T_34 | _r1_c_cat_T_43; // @[package.scala:81:59] wire _r1_c_cat_T_45 = _r1_c_cat_T_27 | _r1_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _r1_c_cat_T_47 = _r1_c_cat_T_45 | _r1_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _r1_c_cat_T_49 = _r1_c_cat_T_47 | _r1_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] r1_c = {_r1_c_cat_T_22, _r1_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _r1_T = {r1_c, new_coh_state}; // @[Metadata.scala:29:18, :58:19] wire _r1_T_25 = _r1_T == 4'hC; // @[Misc.scala:49:20] wire [1:0] _r1_T_27 = {1'h0, _r1_T_25}; // @[Misc.scala:35:36, :49:20] wire _r1_T_28 = _r1_T == 4'hD; // @[Misc.scala:49:20] wire [1:0] _r1_T_30 = _r1_T_28 ? 2'h2 : _r1_T_27; // @[Misc.scala:35:36, :49:20] wire _r1_T_31 = _r1_T == 4'h4; // @[Misc.scala:49:20] wire [1:0] _r1_T_33 = _r1_T_31 ? 2'h1 : _r1_T_30; // @[Misc.scala:35:36, :49:20] wire _r1_T_34 = _r1_T == 4'h5; // @[Misc.scala:49:20] wire [1:0] _r1_T_36 = _r1_T_34 ? 2'h2 : _r1_T_33; // @[Misc.scala:35:36, :49:20] wire _r1_T_37 = _r1_T == 4'h0; // @[Misc.scala:49:20] wire [1:0] _r1_T_39 = _r1_T_37 ? 2'h0 : _r1_T_36; // @[Misc.scala:35:36, :49:20] wire _r1_T_40 = _r1_T == 4'hE; // @[Misc.scala:49:20] wire _r1_T_41 = _r1_T_40; // @[Misc.scala:35:9, :49:20] wire [1:0] _r1_T_42 = _r1_T_40 ? 2'h3 : _r1_T_39; // @[Misc.scala:35:36, :49:20] wire _r1_T_43 = &_r1_T; // @[Misc.scala:49:20] wire _r1_T_44 = _r1_T_43 | _r1_T_41; // @[Misc.scala:35:9, :49:20] wire [1:0] _r1_T_45 = _r1_T_43 ? 2'h3 : _r1_T_42; // @[Misc.scala:35:36, :49:20] wire _r1_T_46 = _r1_T == 4'h6; // @[Misc.scala:49:20] wire _r1_T_47 = _r1_T_46 | _r1_T_44; // @[Misc.scala:35:9, :49:20] wire [1:0] _r1_T_48 = _r1_T_46 ? 2'h2 : _r1_T_45; // @[Misc.scala:35:36, :49:20] wire _r1_T_49 = _r1_T == 4'h7; // @[Misc.scala:49:20] wire _r1_T_50 = _r1_T_49 | _r1_T_47; // @[Misc.scala:35:9, :49:20] wire [1:0] _r1_T_51 = _r1_T_49 ? 2'h3 : _r1_T_48; // @[Misc.scala:35:36, :49:20] wire _r1_T_52 = _r1_T == 4'h1; // @[Misc.scala:49:20] wire _r1_T_53 = _r1_T_52 | _r1_T_50; // @[Misc.scala:35:9, :49:20] wire [1:0] _r1_T_54 = _r1_T_52 ? 2'h1 : _r1_T_51; // @[Misc.scala:35:36, :49:20] wire _r1_T_55 = _r1_T == 4'h2; // @[Misc.scala:49:20] wire _r1_T_56 = _r1_T_55 | _r1_T_53; // @[Misc.scala:35:9, :49:20] wire [1:0] _r1_T_57 = _r1_T_55 ? 2'h2 : _r1_T_54; // @[Misc.scala:35:36, :49:20] wire _r1_T_58 = _r1_T == 4'h3; // @[Misc.scala:49:20] wire r1_1 = _r1_T_58 | _r1_T_56; // @[Misc.scala:35:9, :49:20] wire [1:0] r1_2 = _r1_T_58 ? 2'h3 : _r1_T_57; // @[Misc.scala:35:36, :49:20] wire _GEN_13 = io_req_bits_cmd_0 == 5'h1; // @[NBDcache.scala:150:7] wire _r2_c_cat_T; // @[Consts.scala:90:32] assign _r2_c_cat_T = _GEN_13; // @[Consts.scala:90:32] wire _r2_c_cat_T_23; // @[Consts.scala:90:32] assign _r2_c_cat_T_23 = _GEN_13; // @[Consts.scala:90:32] wire _needs_second_acq_T; // @[Consts.scala:90:32] assign _needs_second_acq_T = _GEN_13; // @[Consts.scala:90:32] wire _dirties_cat_T; // @[Consts.scala:90:32] assign _dirties_cat_T = _GEN_13; // @[Consts.scala:90:32] wire _dirties_cat_T_23; // @[Consts.scala:90:32] assign _dirties_cat_T_23 = _GEN_13; // @[Consts.scala:90:32] wire _r_c_cat_T; // @[Consts.scala:90:32] assign _r_c_cat_T = _GEN_13; // @[Consts.scala:90:32] wire _r_c_cat_T_23; // @[Consts.scala:90:32] assign _r_c_cat_T_23 = _GEN_13; // @[Consts.scala:90:32] wire _GEN_14 = io_req_bits_cmd_0 == 5'h11; // @[NBDcache.scala:150:7] wire _r2_c_cat_T_1; // @[Consts.scala:90:49] assign _r2_c_cat_T_1 = _GEN_14; // @[Consts.scala:90:49] wire _r2_c_cat_T_24; // @[Consts.scala:90:49] assign _r2_c_cat_T_24 = _GEN_14; // @[Consts.scala:90:49] wire _needs_second_acq_T_1; // @[Consts.scala:90:49] assign _needs_second_acq_T_1 = _GEN_14; // @[Consts.scala:90:49] wire _dirties_cat_T_1; // @[Consts.scala:90:49] assign _dirties_cat_T_1 = _GEN_14; // @[Consts.scala:90:49] wire _dirties_cat_T_24; // @[Consts.scala:90:49] assign _dirties_cat_T_24 = _GEN_14; // @[Consts.scala:90:49] wire _r_c_cat_T_1; // @[Consts.scala:90:49] assign _r_c_cat_T_1 = _GEN_14; // @[Consts.scala:90:49] wire _r_c_cat_T_24; // @[Consts.scala:90:49] assign _r_c_cat_T_24 = _GEN_14; // @[Consts.scala:90:49] wire _r2_c_cat_T_2 = _r2_c_cat_T | _r2_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _GEN_15 = io_req_bits_cmd_0 == 5'h7; // @[NBDcache.scala:150:7] wire _r2_c_cat_T_3; // @[Consts.scala:90:66] assign _r2_c_cat_T_3 = _GEN_15; // @[Consts.scala:90:66] wire _r2_c_cat_T_26; // @[Consts.scala:90:66] assign _r2_c_cat_T_26 = _GEN_15; // @[Consts.scala:90:66] wire _needs_second_acq_T_3; // @[Consts.scala:90:66] assign _needs_second_acq_T_3 = _GEN_15; // @[Consts.scala:90:66] wire _dirties_cat_T_3; // @[Consts.scala:90:66] assign _dirties_cat_T_3 = _GEN_15; // @[Consts.scala:90:66] wire _dirties_cat_T_26; // @[Consts.scala:90:66] assign _dirties_cat_T_26 = _GEN_15; // @[Consts.scala:90:66] wire _r_c_cat_T_3; // @[Consts.scala:90:66] assign _r_c_cat_T_3 = _GEN_15; // @[Consts.scala:90:66] wire _r_c_cat_T_26; // @[Consts.scala:90:66] assign _r_c_cat_T_26 = _GEN_15; // @[Consts.scala:90:66] wire _r2_c_cat_T_4 = _r2_c_cat_T_2 | _r2_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _GEN_16 = io_req_bits_cmd_0 == 5'h4; // @[package.scala:16:47] wire _r2_c_cat_T_5; // @[package.scala:16:47] assign _r2_c_cat_T_5 = _GEN_16; // @[package.scala:16:47] wire _r2_c_cat_T_28; // @[package.scala:16:47] assign _r2_c_cat_T_28 = _GEN_16; // @[package.scala:16:47] wire _needs_second_acq_T_5; // @[package.scala:16:47] assign _needs_second_acq_T_5 = _GEN_16; // @[package.scala:16:47] wire _dirties_cat_T_5; // @[package.scala:16:47] assign _dirties_cat_T_5 = _GEN_16; // @[package.scala:16:47] wire _dirties_cat_T_28; // @[package.scala:16:47] assign _dirties_cat_T_28 = _GEN_16; // @[package.scala:16:47] wire _r_c_cat_T_5; // @[package.scala:16:47] assign _r_c_cat_T_5 = _GEN_16; // @[package.scala:16:47] wire _r_c_cat_T_28; // @[package.scala:16:47] assign _r_c_cat_T_28 = _GEN_16; // @[package.scala:16:47] wire _GEN_17 = io_req_bits_cmd_0 == 5'h9; // @[package.scala:16:47] wire _r2_c_cat_T_6; // @[package.scala:16:47] assign _r2_c_cat_T_6 = _GEN_17; // @[package.scala:16:47] wire _r2_c_cat_T_29; // @[package.scala:16:47] assign _r2_c_cat_T_29 = _GEN_17; // @[package.scala:16:47] wire _needs_second_acq_T_6; // @[package.scala:16:47] assign _needs_second_acq_T_6 = _GEN_17; // @[package.scala:16:47] wire _dirties_cat_T_6; // @[package.scala:16:47] assign _dirties_cat_T_6 = _GEN_17; // @[package.scala:16:47] wire _dirties_cat_T_29; // @[package.scala:16:47] assign _dirties_cat_T_29 = _GEN_17; // @[package.scala:16:47] wire _r_c_cat_T_6; // @[package.scala:16:47] assign _r_c_cat_T_6 = _GEN_17; // @[package.scala:16:47] wire _r_c_cat_T_29; // @[package.scala:16:47] assign _r_c_cat_T_29 = _GEN_17; // @[package.scala:16:47] wire _GEN_18 = io_req_bits_cmd_0 == 5'hA; // @[package.scala:16:47] wire _r2_c_cat_T_7; // @[package.scala:16:47] assign _r2_c_cat_T_7 = _GEN_18; // @[package.scala:16:47] wire _r2_c_cat_T_30; // @[package.scala:16:47] assign _r2_c_cat_T_30 = _GEN_18; // @[package.scala:16:47] wire _needs_second_acq_T_7; // @[package.scala:16:47] assign _needs_second_acq_T_7 = _GEN_18; // @[package.scala:16:47] wire _dirties_cat_T_7; // @[package.scala:16:47] assign _dirties_cat_T_7 = _GEN_18; // @[package.scala:16:47] wire _dirties_cat_T_30; // @[package.scala:16:47] assign _dirties_cat_T_30 = _GEN_18; // @[package.scala:16:47] wire _r_c_cat_T_7; // @[package.scala:16:47] assign _r_c_cat_T_7 = _GEN_18; // @[package.scala:16:47] wire _r_c_cat_T_30; // @[package.scala:16:47] assign _r_c_cat_T_30 = _GEN_18; // @[package.scala:16:47] wire _GEN_19 = io_req_bits_cmd_0 == 5'hB; // @[package.scala:16:47] wire _r2_c_cat_T_8; // @[package.scala:16:47] assign _r2_c_cat_T_8 = _GEN_19; // @[package.scala:16:47] wire _r2_c_cat_T_31; // @[package.scala:16:47] assign _r2_c_cat_T_31 = _GEN_19; // @[package.scala:16:47] wire _needs_second_acq_T_8; // @[package.scala:16:47] assign _needs_second_acq_T_8 = _GEN_19; // @[package.scala:16:47] wire _dirties_cat_T_8; // @[package.scala:16:47] assign _dirties_cat_T_8 = _GEN_19; // @[package.scala:16:47] wire _dirties_cat_T_31; // @[package.scala:16:47] assign _dirties_cat_T_31 = _GEN_19; // @[package.scala:16:47] wire _r_c_cat_T_8; // @[package.scala:16:47] assign _r_c_cat_T_8 = _GEN_19; // @[package.scala:16:47] wire _r_c_cat_T_31; // @[package.scala:16:47] assign _r_c_cat_T_31 = _GEN_19; // @[package.scala:16:47] wire _r2_c_cat_T_9 = _r2_c_cat_T_5 | _r2_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_10 = _r2_c_cat_T_9 | _r2_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_11 = _r2_c_cat_T_10 | _r2_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _GEN_20 = io_req_bits_cmd_0 == 5'h8; // @[package.scala:16:47] wire _r2_c_cat_T_12; // @[package.scala:16:47] assign _r2_c_cat_T_12 = _GEN_20; // @[package.scala:16:47] wire _r2_c_cat_T_35; // @[package.scala:16:47] assign _r2_c_cat_T_35 = _GEN_20; // @[package.scala:16:47] wire _needs_second_acq_T_12; // @[package.scala:16:47] assign _needs_second_acq_T_12 = _GEN_20; // @[package.scala:16:47] wire _dirties_cat_T_12; // @[package.scala:16:47] assign _dirties_cat_T_12 = _GEN_20; // @[package.scala:16:47] wire _dirties_cat_T_35; // @[package.scala:16:47] assign _dirties_cat_T_35 = _GEN_20; // @[package.scala:16:47] wire _r_c_cat_T_12; // @[package.scala:16:47] assign _r_c_cat_T_12 = _GEN_20; // @[package.scala:16:47] wire _r_c_cat_T_35; // @[package.scala:16:47] assign _r_c_cat_T_35 = _GEN_20; // @[package.scala:16:47] wire _GEN_21 = io_req_bits_cmd_0 == 5'hC; // @[package.scala:16:47] wire _r2_c_cat_T_13; // @[package.scala:16:47] assign _r2_c_cat_T_13 = _GEN_21; // @[package.scala:16:47] wire _r2_c_cat_T_36; // @[package.scala:16:47] assign _r2_c_cat_T_36 = _GEN_21; // @[package.scala:16:47] wire _needs_second_acq_T_13; // @[package.scala:16:47] assign _needs_second_acq_T_13 = _GEN_21; // @[package.scala:16:47] wire _dirties_cat_T_13; // @[package.scala:16:47] assign _dirties_cat_T_13 = _GEN_21; // @[package.scala:16:47] wire _dirties_cat_T_36; // @[package.scala:16:47] assign _dirties_cat_T_36 = _GEN_21; // @[package.scala:16:47] wire _r_c_cat_T_13; // @[package.scala:16:47] assign _r_c_cat_T_13 = _GEN_21; // @[package.scala:16:47] wire _r_c_cat_T_36; // @[package.scala:16:47] assign _r_c_cat_T_36 = _GEN_21; // @[package.scala:16:47] wire _GEN_22 = io_req_bits_cmd_0 == 5'hD; // @[package.scala:16:47] wire _r2_c_cat_T_14; // @[package.scala:16:47] assign _r2_c_cat_T_14 = _GEN_22; // @[package.scala:16:47] wire _r2_c_cat_T_37; // @[package.scala:16:47] assign _r2_c_cat_T_37 = _GEN_22; // @[package.scala:16:47] wire _needs_second_acq_T_14; // @[package.scala:16:47] assign _needs_second_acq_T_14 = _GEN_22; // @[package.scala:16:47] wire _dirties_cat_T_14; // @[package.scala:16:47] assign _dirties_cat_T_14 = _GEN_22; // @[package.scala:16:47] wire _dirties_cat_T_37; // @[package.scala:16:47] assign _dirties_cat_T_37 = _GEN_22; // @[package.scala:16:47] wire _r_c_cat_T_14; // @[package.scala:16:47] assign _r_c_cat_T_14 = _GEN_22; // @[package.scala:16:47] wire _r_c_cat_T_37; // @[package.scala:16:47] assign _r_c_cat_T_37 = _GEN_22; // @[package.scala:16:47] wire _GEN_23 = io_req_bits_cmd_0 == 5'hE; // @[package.scala:16:47] wire _r2_c_cat_T_15; // @[package.scala:16:47] assign _r2_c_cat_T_15 = _GEN_23; // @[package.scala:16:47] wire _r2_c_cat_T_38; // @[package.scala:16:47] assign _r2_c_cat_T_38 = _GEN_23; // @[package.scala:16:47] wire _needs_second_acq_T_15; // @[package.scala:16:47] assign _needs_second_acq_T_15 = _GEN_23; // @[package.scala:16:47] wire _dirties_cat_T_15; // @[package.scala:16:47] assign _dirties_cat_T_15 = _GEN_23; // @[package.scala:16:47] wire _dirties_cat_T_38; // @[package.scala:16:47] assign _dirties_cat_T_38 = _GEN_23; // @[package.scala:16:47] wire _r_c_cat_T_15; // @[package.scala:16:47] assign _r_c_cat_T_15 = _GEN_23; // @[package.scala:16:47] wire _r_c_cat_T_38; // @[package.scala:16:47] assign _r_c_cat_T_38 = _GEN_23; // @[package.scala:16:47] wire _GEN_24 = io_req_bits_cmd_0 == 5'hF; // @[package.scala:16:47] wire _r2_c_cat_T_16; // @[package.scala:16:47] assign _r2_c_cat_T_16 = _GEN_24; // @[package.scala:16:47] wire _r2_c_cat_T_39; // @[package.scala:16:47] assign _r2_c_cat_T_39 = _GEN_24; // @[package.scala:16:47] wire _needs_second_acq_T_16; // @[package.scala:16:47] assign _needs_second_acq_T_16 = _GEN_24; // @[package.scala:16:47] wire _dirties_cat_T_16; // @[package.scala:16:47] assign _dirties_cat_T_16 = _GEN_24; // @[package.scala:16:47] wire _dirties_cat_T_39; // @[package.scala:16:47] assign _dirties_cat_T_39 = _GEN_24; // @[package.scala:16:47] wire _r_c_cat_T_16; // @[package.scala:16:47] assign _r_c_cat_T_16 = _GEN_24; // @[package.scala:16:47] wire _r_c_cat_T_39; // @[package.scala:16:47] assign _r_c_cat_T_39 = _GEN_24; // @[package.scala:16:47] wire _r2_c_cat_T_17 = _r2_c_cat_T_12 | _r2_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_18 = _r2_c_cat_T_17 | _r2_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_19 = _r2_c_cat_T_18 | _r2_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_20 = _r2_c_cat_T_19 | _r2_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_21 = _r2_c_cat_T_11 | _r2_c_cat_T_20; // @[package.scala:81:59] wire _r2_c_cat_T_22 = _r2_c_cat_T_4 | _r2_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _r2_c_cat_T_25 = _r2_c_cat_T_23 | _r2_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _r2_c_cat_T_27 = _r2_c_cat_T_25 | _r2_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _r2_c_cat_T_32 = _r2_c_cat_T_28 | _r2_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_33 = _r2_c_cat_T_32 | _r2_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_34 = _r2_c_cat_T_33 | _r2_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_40 = _r2_c_cat_T_35 | _r2_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_41 = _r2_c_cat_T_40 | _r2_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_42 = _r2_c_cat_T_41 | _r2_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_43 = _r2_c_cat_T_42 | _r2_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_44 = _r2_c_cat_T_34 | _r2_c_cat_T_43; // @[package.scala:81:59] wire _r2_c_cat_T_45 = _r2_c_cat_T_27 | _r2_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _GEN_25 = io_req_bits_cmd_0 == 5'h3; // @[NBDcache.scala:150:7] wire _r2_c_cat_T_46; // @[Consts.scala:91:54] assign _r2_c_cat_T_46 = _GEN_25; // @[Consts.scala:91:54] wire _needs_second_acq_T_23; // @[Consts.scala:91:54] assign _needs_second_acq_T_23 = _GEN_25; // @[Consts.scala:91:54] wire _dirties_cat_T_46; // @[Consts.scala:91:54] assign _dirties_cat_T_46 = _GEN_25; // @[Consts.scala:91:54] wire _rpq_io_enq_valid_T_4; // @[Consts.scala:88:52] assign _rpq_io_enq_valid_T_4 = _GEN_25; // @[Consts.scala:88:52, :91:54] wire _r_c_cat_T_46; // @[Consts.scala:91:54] assign _r_c_cat_T_46 = _GEN_25; // @[Consts.scala:91:54] wire _r2_c_cat_T_47 = _r2_c_cat_T_45 | _r2_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _GEN_26 = io_req_bits_cmd_0 == 5'h6; // @[NBDcache.scala:150:7] wire _r2_c_cat_T_48; // @[Consts.scala:91:71] assign _r2_c_cat_T_48 = _GEN_26; // @[Consts.scala:91:71] wire _needs_second_acq_T_25; // @[Consts.scala:91:71] assign _needs_second_acq_T_25 = _GEN_26; // @[Consts.scala:91:71] wire _dirties_cat_T_48; // @[Consts.scala:91:71] assign _dirties_cat_T_48 = _GEN_26; // @[Consts.scala:91:71] wire _r_c_cat_T_48; // @[Consts.scala:91:71] assign _r_c_cat_T_48 = _GEN_26; // @[Consts.scala:91:71] wire _r2_c_cat_T_49 = _r2_c_cat_T_47 | _r2_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] r2_c = {_r2_c_cat_T_22, _r2_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _r2_T = {r2_c, new_coh_state}; // @[Metadata.scala:29:18, :58:19] wire _r2_T_25 = _r2_T == 4'hC; // @[Misc.scala:49:20] wire [1:0] _r2_T_27 = {1'h0, _r2_T_25}; // @[Misc.scala:35:36, :49:20] wire _r2_T_28 = _r2_T == 4'hD; // @[Misc.scala:49:20] wire [1:0] _r2_T_30 = _r2_T_28 ? 2'h2 : _r2_T_27; // @[Misc.scala:35:36, :49:20] wire _r2_T_31 = _r2_T == 4'h4; // @[Misc.scala:49:20] wire [1:0] _r2_T_33 = _r2_T_31 ? 2'h1 : _r2_T_30; // @[Misc.scala:35:36, :49:20] wire _r2_T_34 = _r2_T == 4'h5; // @[Misc.scala:49:20] wire [1:0] _r2_T_36 = _r2_T_34 ? 2'h2 : _r2_T_33; // @[Misc.scala:35:36, :49:20] wire _r2_T_37 = _r2_T == 4'h0; // @[Misc.scala:49:20] wire [1:0] _r2_T_39 = _r2_T_37 ? 2'h0 : _r2_T_36; // @[Misc.scala:35:36, :49:20] wire _r2_T_40 = _r2_T == 4'hE; // @[Misc.scala:49:20] wire _r2_T_41 = _r2_T_40; // @[Misc.scala:35:9, :49:20] wire [1:0] _r2_T_42 = _r2_T_40 ? 2'h3 : _r2_T_39; // @[Misc.scala:35:36, :49:20] wire _r2_T_43 = &_r2_T; // @[Misc.scala:49:20] wire _r2_T_44 = _r2_T_43 | _r2_T_41; // @[Misc.scala:35:9, :49:20] wire [1:0] _r2_T_45 = _r2_T_43 ? 2'h3 : _r2_T_42; // @[Misc.scala:35:36, :49:20] wire _r2_T_46 = _r2_T == 4'h6; // @[Misc.scala:49:20] wire _r2_T_47 = _r2_T_46 | _r2_T_44; // @[Misc.scala:35:9, :49:20] wire [1:0] _r2_T_48 = _r2_T_46 ? 2'h2 : _r2_T_45; // @[Misc.scala:35:36, :49:20] wire _r2_T_49 = _r2_T == 4'h7; // @[Misc.scala:49:20] wire _r2_T_50 = _r2_T_49 | _r2_T_47; // @[Misc.scala:35:9, :49:20] wire [1:0] _r2_T_51 = _r2_T_49 ? 2'h3 : _r2_T_48; // @[Misc.scala:35:36, :49:20] wire _r2_T_52 = _r2_T == 4'h1; // @[Misc.scala:49:20] wire _r2_T_53 = _r2_T_52 | _r2_T_50; // @[Misc.scala:35:9, :49:20] wire [1:0] _r2_T_54 = _r2_T_52 ? 2'h1 : _r2_T_51; // @[Misc.scala:35:36, :49:20] wire _r2_T_55 = _r2_T == 4'h2; // @[Misc.scala:49:20] wire _r2_T_56 = _r2_T_55 | _r2_T_53; // @[Misc.scala:35:9, :49:20] wire [1:0] _r2_T_57 = _r2_T_55 ? 2'h2 : _r2_T_54; // @[Misc.scala:35:36, :49:20] wire _r2_T_58 = _r2_T == 4'h3; // @[Misc.scala:49:20] wire r2_1 = _r2_T_58 | _r2_T_56; // @[Misc.scala:35:9, :49:20] wire [1:0] r2_2 = _r2_T_58 ? 2'h3 : _r2_T_57; // @[Misc.scala:35:36, :49:20] wire _needs_second_acq_T_2 = _needs_second_acq_T | _needs_second_acq_T_1; // @[Consts.scala:90:{32,42,49}] wire _needs_second_acq_T_4 = _needs_second_acq_T_2 | _needs_second_acq_T_3; // @[Consts.scala:90:{42,59,66}] wire _needs_second_acq_T_9 = _needs_second_acq_T_5 | _needs_second_acq_T_6; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_10 = _needs_second_acq_T_9 | _needs_second_acq_T_7; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_11 = _needs_second_acq_T_10 | _needs_second_acq_T_8; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_17 = _needs_second_acq_T_12 | _needs_second_acq_T_13; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_18 = _needs_second_acq_T_17 | _needs_second_acq_T_14; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_19 = _needs_second_acq_T_18 | _needs_second_acq_T_15; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_20 = _needs_second_acq_T_19 | _needs_second_acq_T_16; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_21 = _needs_second_acq_T_11 | _needs_second_acq_T_20; // @[package.scala:81:59] wire _needs_second_acq_T_22 = _needs_second_acq_T_4 | _needs_second_acq_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _needs_second_acq_T_24 = _needs_second_acq_T_22 | _needs_second_acq_T_23; // @[Consts.scala:90:76, :91:{47,54}] wire _needs_second_acq_T_26 = _needs_second_acq_T_24 | _needs_second_acq_T_25; // @[Consts.scala:91:{47,64,71}] wire _needs_second_acq_T_29 = _needs_second_acq_T_27 | _needs_second_acq_T_28; // @[Consts.scala:90:{32,42,49}] wire _needs_second_acq_T_31 = _needs_second_acq_T_29 | _needs_second_acq_T_30; // @[Consts.scala:90:{42,59,66}] wire _needs_second_acq_T_36 = _needs_second_acq_T_32 | _needs_second_acq_T_33; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_37 = _needs_second_acq_T_36 | _needs_second_acq_T_34; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_38 = _needs_second_acq_T_37 | _needs_second_acq_T_35; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_44 = _needs_second_acq_T_39 | _needs_second_acq_T_40; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_45 = _needs_second_acq_T_44 | _needs_second_acq_T_41; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_46 = _needs_second_acq_T_45 | _needs_second_acq_T_42; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_47 = _needs_second_acq_T_46 | _needs_second_acq_T_43; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_48 = _needs_second_acq_T_38 | _needs_second_acq_T_47; // @[package.scala:81:59] wire _needs_second_acq_T_49 = _needs_second_acq_T_31 | _needs_second_acq_T_48; // @[Consts.scala:87:44, :90:{59,76}] wire _needs_second_acq_T_51 = _needs_second_acq_T_49 | _needs_second_acq_T_50; // @[Consts.scala:90:76, :91:{47,54}] wire _needs_second_acq_T_53 = _needs_second_acq_T_51 | _needs_second_acq_T_52; // @[Consts.scala:91:{47,64,71}] wire _needs_second_acq_T_54 = ~_needs_second_acq_T_53; // @[Metadata.scala:104:57] wire cmd_requires_second_acquire = _needs_second_acq_T_26 & _needs_second_acq_T_54; // @[Metadata.scala:104:{54,57}] wire is_hit_again = r1_1 & r2_1; // @[Misc.scala:35:9] wire _dirties_cat_T_2 = _dirties_cat_T | _dirties_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _dirties_cat_T_4 = _dirties_cat_T_2 | _dirties_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _dirties_cat_T_9 = _dirties_cat_T_5 | _dirties_cat_T_6; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_10 = _dirties_cat_T_9 | _dirties_cat_T_7; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_11 = _dirties_cat_T_10 | _dirties_cat_T_8; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_17 = _dirties_cat_T_12 | _dirties_cat_T_13; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_18 = _dirties_cat_T_17 | _dirties_cat_T_14; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_19 = _dirties_cat_T_18 | _dirties_cat_T_15; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_20 = _dirties_cat_T_19 | _dirties_cat_T_16; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_21 = _dirties_cat_T_11 | _dirties_cat_T_20; // @[package.scala:81:59] wire _dirties_cat_T_22 = _dirties_cat_T_4 | _dirties_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _dirties_cat_T_25 = _dirties_cat_T_23 | _dirties_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _dirties_cat_T_27 = _dirties_cat_T_25 | _dirties_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _dirties_cat_T_32 = _dirties_cat_T_28 | _dirties_cat_T_29; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_33 = _dirties_cat_T_32 | _dirties_cat_T_30; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_34 = _dirties_cat_T_33 | _dirties_cat_T_31; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_40 = _dirties_cat_T_35 | _dirties_cat_T_36; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_41 = _dirties_cat_T_40 | _dirties_cat_T_37; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_42 = _dirties_cat_T_41 | _dirties_cat_T_38; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_43 = _dirties_cat_T_42 | _dirties_cat_T_39; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_44 = _dirties_cat_T_34 | _dirties_cat_T_43; // @[package.scala:81:59] wire _dirties_cat_T_45 = _dirties_cat_T_27 | _dirties_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _dirties_cat_T_47 = _dirties_cat_T_45 | _dirties_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _dirties_cat_T_49 = _dirties_cat_T_47 | _dirties_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] dirties_cat = {_dirties_cat_T_22, _dirties_cat_T_49}; // @[Metadata.scala:29:18] wire dirties = &dirties_cat; // @[Metadata.scala:29:18, :106:42] wire [1:0] biggest_grow_param = dirties ? r2_2 : r1_2; // @[Misc.scala:35:36] wire [1:0] dirtier_coh_state = biggest_grow_param; // @[Metadata.scala:107:33, :160:20] wire [4:0] dirtier_cmd = dirties ? io_req_bits_cmd_0 : req_cmd; // @[Metadata.scala:106:42, :109:27] wire [26:0] _r_beats1_decode_T = 27'hFFF << io_mem_grant_bits_size_0; // @[package.scala:243:71] wire [11:0] _r_beats1_decode_T_1 = _r_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _r_beats1_decode_T_2 = ~_r_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] r_beats1_decode = _r_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire r_beats1_opdata = io_mem_grant_bits_opcode_0[0]; // @[Edges.scala:106:36] wire [8:0] r_beats1 = r_beats1_opdata ? r_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] r_counter; // @[Edges.scala:229:27] wire [9:0] _r_counter1_T = {1'h0, r_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] r_counter1 = _r_counter1_T[8:0]; // @[Edges.scala:230:28] wire r_1_1 = r_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _r_last_T = r_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _r_last_T_1 = r_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire r_2 = _r_last_T | _r_last_T_1; // @[Edges.scala:232:{25,33,43}] wire refill_done = r_2 & io_mem_grant_valid_0; // @[Edges.scala:232:33, :233:22] wire [8:0] _r_count_T = ~r_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] r_4 = r_beats1 & _r_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _r_counter_T = r_1_1 ? r_beats1 : r_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] refill_address_inc = {r_4, 3'h0}; // @[Edges.scala:234:25, :269:29] wire _GEN_27 = state == 4'h1; // @[package.scala:16:47] wire _sec_rdy_T; // @[package.scala:16:47] assign _sec_rdy_T = _GEN_27; // @[package.scala:16:47] wire _io_probe_rdy_T_1; // @[package.scala:16:47] assign _io_probe_rdy_T_1 = _GEN_27; // @[package.scala:16:47] assign _io_wb_req_valid_T = _GEN_27; // @[package.scala:16:47] wire _T_11 = state == 4'h2; // @[package.scala:16:47] wire _sec_rdy_T_1; // @[package.scala:16:47] assign _sec_rdy_T_1 = _T_11; // @[package.scala:16:47] wire _io_probe_rdy_T_2; // @[package.scala:16:47] assign _io_probe_rdy_T_2 = _T_11; // @[package.scala:16:47] wire _T_9 = state == 4'h3; // @[package.scala:16:47] wire _sec_rdy_T_2; // @[package.scala:16:47] assign _sec_rdy_T_2 = _T_9; // @[package.scala:16:47] wire _io_probe_rdy_T_3; // @[package.scala:16:47] assign _io_probe_rdy_T_3 = _T_9; // @[package.scala:16:47] wire _io_meta_write_valid_T_1; // @[package.scala:16:47] assign _io_meta_write_valid_T_1 = _T_9; // @[package.scala:16:47] wire _io_meta_write_bits_data_coh_T; // @[NBDcache.scala:286:44] assign _io_meta_write_bits_data_coh_T = _T_9; // @[package.scala:16:47] wire _sec_rdy_T_3 = _sec_rdy_T | _sec_rdy_T_1; // @[package.scala:16:47, :81:59] wire _sec_rdy_T_4 = _sec_rdy_T_3 | _sec_rdy_T_2; // @[package.scala:16:47, :81:59] wire _GEN_28 = state == 4'h4; // @[package.scala:16:47] wire _sec_rdy_T_5; // @[package.scala:16:47] assign _sec_rdy_T_5 = _GEN_28; // @[package.scala:16:47] wire _can_finish_T_1; // @[package.scala:16:47] assign _can_finish_T_1 = _GEN_28; // @[package.scala:16:47] wire _io_mem_acquire_valid_T; // @[NBDcache.scala:298:33] assign _io_mem_acquire_valid_T = _GEN_28; // @[package.scala:16:47] wire _sec_rdy_T_6 = state == 4'h5; // @[package.scala:16:47] wire _sec_rdy_T_7 = _sec_rdy_T_5 | _sec_rdy_T_6; // @[package.scala:16:47, :81:59] wire _sec_rdy_T_8 = ~cmd_requires_second_acquire; // @[Metadata.scala:104:54] wire _sec_rdy_T_9 = _sec_rdy_T_7 & _sec_rdy_T_8; // @[package.scala:81:59] wire _sec_rdy_T_10 = ~refill_done; // @[Edges.scala:233:22] wire _sec_rdy_T_11 = _sec_rdy_T_9 & _sec_rdy_T_10; // @[NBDcache.scala:197:65, :198:{52,55}] wire _sec_rdy_T_12 = _sec_rdy_T_4 | _sec_rdy_T_11; // @[package.scala:81:59] wire sec_rdy = idx_match & _sec_rdy_T_12; // @[NBDcache.scala:180:27, :195:27, :196:56] wire _rpq_io_enq_valid_T = io_req_pri_val_0 & io_req_pri_rdy_0; // @[NBDcache.scala:150:7, :201:39] wire _rpq_io_enq_valid_T_1 = io_req_sec_val_0 & sec_rdy; // @[NBDcache.scala:150:7, :195:27, :201:75] wire _rpq_io_enq_valid_T_2 = _rpq_io_enq_valid_T | _rpq_io_enq_valid_T_1; // @[NBDcache.scala:201:{39,57,75}] wire _rpq_io_enq_valid_T_3 = io_req_bits_cmd_0 == 5'h2; // @[NBDcache.scala:150:7] wire _rpq_io_enq_valid_T_5 = _rpq_io_enq_valid_T_3 | _rpq_io_enq_valid_T_4; // @[Consts.scala:88:{35,45,52}] wire _rpq_io_enq_valid_T_6 = ~_rpq_io_enq_valid_T_5; // @[NBDcache.scala:201:90] wire _rpq_io_enq_valid_T_7 = _rpq_io_enq_valid_T_2 & _rpq_io_enq_valid_T_6; // @[NBDcache.scala:201:{57,87,90}] wire _T = state == 4'h8; // @[NBDcache.scala:174:22, :203:49] wire _rpq_io_deq_ready_T; // @[NBDcache.scala:203:49] assign _rpq_io_deq_ready_T = _T; // @[NBDcache.scala:203:49] assign _io_meta_read_valid_T = _T; // @[NBDcache.scala:203:49, :305:31] wire _io_replay_valid_T; // @[NBDcache.scala:310:28] assign _io_replay_valid_T = _T; // @[NBDcache.scala:203:49, :310:28] wire _rpq_io_deq_ready_T_1 = io_replay_ready_0 & _rpq_io_deq_ready_T; // @[NBDcache.scala:150:7, :203:{40,49}] wire _rpq_io_deq_ready_T_2 = ~(|state); // @[NBDcache.scala:174:22, :203:75] wire _rpq_io_deq_ready_T_3 = _rpq_io_deq_ready_T_1 | _rpq_io_deq_ready_T_2; // @[NBDcache.scala:203:{40,66,75}] reg acked; // @[NBDcache.scala:205:18] wire _io_meta_write_valid_T = state == 4'h6; // @[package.scala:16:47] wire [3:0] _needs_wb_r_T_6 = {2'h2, io_req_bits_old_meta_coh_state_0}; // @[Metadata.scala:120:19] wire _needs_wb_r_T_19 = _needs_wb_r_T_6 == 4'h8; // @[Misc.scala:56:20] wire [2:0] _needs_wb_r_T_21 = _needs_wb_r_T_19 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20] wire _needs_wb_r_T_23 = _needs_wb_r_T_6 == 4'h9; // @[Misc.scala:56:20] wire [2:0] _needs_wb_r_T_25 = _needs_wb_r_T_23 ? 3'h2 : _needs_wb_r_T_21; // @[Misc.scala:38:36, :56:20] wire _needs_wb_r_T_27 = _needs_wb_r_T_6 == 4'hA; // @[Misc.scala:56:20] wire [2:0] _needs_wb_r_T_29 = _needs_wb_r_T_27 ? 3'h1 : _needs_wb_r_T_25; // @[Misc.scala:38:36, :56:20] wire _needs_wb_r_T_31 = _needs_wb_r_T_6 == 4'hB; // @[Misc.scala:56:20] wire _needs_wb_r_T_32 = _needs_wb_r_T_31; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_33 = _needs_wb_r_T_31 ? 3'h1 : _needs_wb_r_T_29; // @[Misc.scala:38:36, :56:20] wire _needs_wb_r_T_35 = _needs_wb_r_T_6 == 4'h4; // @[Misc.scala:56:20] wire _needs_wb_r_T_36 = ~_needs_wb_r_T_35 & _needs_wb_r_T_32; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_37 = _needs_wb_r_T_35 ? 3'h5 : _needs_wb_r_T_33; // @[Misc.scala:38:36, :56:20] wire _needs_wb_r_T_39 = _needs_wb_r_T_6 == 4'h5; // @[Misc.scala:56:20] wire _needs_wb_r_T_40 = ~_needs_wb_r_T_39 & _needs_wb_r_T_36; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_41 = _needs_wb_r_T_39 ? 3'h4 : _needs_wb_r_T_37; // @[Misc.scala:38:36, :56:20] wire [1:0] _needs_wb_r_T_42 = {1'h0, _needs_wb_r_T_39}; // @[Misc.scala:38:63, :56:20] wire _needs_wb_r_T_43 = _needs_wb_r_T_6 == 4'h6; // @[Misc.scala:56:20] wire _needs_wb_r_T_44 = ~_needs_wb_r_T_43 & _needs_wb_r_T_40; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_45 = _needs_wb_r_T_43 ? 3'h0 : _needs_wb_r_T_41; // @[Misc.scala:38:36, :56:20] wire [1:0] _needs_wb_r_T_46 = _needs_wb_r_T_43 ? 2'h1 : _needs_wb_r_T_42; // @[Misc.scala:38:63, :56:20] wire _needs_wb_r_T_47 = _needs_wb_r_T_6 == 4'h7; // @[Misc.scala:56:20] wire _needs_wb_r_T_48 = _needs_wb_r_T_47 | _needs_wb_r_T_44; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_49 = _needs_wb_r_T_47 ? 3'h0 : _needs_wb_r_T_45; // @[Misc.scala:38:36, :56:20] wire [1:0] _needs_wb_r_T_50 = _needs_wb_r_T_47 ? 2'h1 : _needs_wb_r_T_46; // @[Misc.scala:38:63, :56:20] wire _needs_wb_r_T_51 = _needs_wb_r_T_6 == 4'h0; // @[Misc.scala:56:20] wire _needs_wb_r_T_52 = ~_needs_wb_r_T_51 & _needs_wb_r_T_48; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_53 = _needs_wb_r_T_51 ? 3'h5 : _needs_wb_r_T_49; // @[Misc.scala:38:36, :56:20] wire [1:0] _needs_wb_r_T_54 = _needs_wb_r_T_51 ? 2'h0 : _needs_wb_r_T_50; // @[Misc.scala:38:63, :56:20] wire _needs_wb_r_T_55 = _needs_wb_r_T_6 == 4'h1; // @[Misc.scala:56:20] wire _needs_wb_r_T_56 = ~_needs_wb_r_T_55 & _needs_wb_r_T_52; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_57 = _needs_wb_r_T_55 ? 3'h4 : _needs_wb_r_T_53; // @[Misc.scala:38:36, :56:20] wire [1:0] _needs_wb_r_T_58 = _needs_wb_r_T_55 ? 2'h1 : _needs_wb_r_T_54; // @[Misc.scala:38:63, :56:20] wire _needs_wb_r_T_59 = _needs_wb_r_T_6 == 4'h2; // @[Misc.scala:56:20] wire _needs_wb_r_T_60 = ~_needs_wb_r_T_59 & _needs_wb_r_T_56; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_61 = _needs_wb_r_T_59 ? 3'h3 : _needs_wb_r_T_57; // @[Misc.scala:38:36, :56:20] wire [1:0] _needs_wb_r_T_62 = _needs_wb_r_T_59 ? 2'h2 : _needs_wb_r_T_58; // @[Misc.scala:38:63, :56:20] wire _needs_wb_r_T_63 = _needs_wb_r_T_6 == 4'h3; // @[Misc.scala:56:20] wire needs_wb = _needs_wb_r_T_63 | _needs_wb_r_T_60; // @[Misc.scala:38:9, :56:20] wire [2:0] needs_wb_r_2 = _needs_wb_r_T_63 ? 3'h3 : _needs_wb_r_T_61; // @[Misc.scala:38:36, :56:20] wire [1:0] needs_wb_r_3 = _needs_wb_r_T_63 ? 2'h2 : _needs_wb_r_T_62; // @[Misc.scala:38:63, :56:20] wire [1:0] needs_wb_meta_state = needs_wb_r_3; // @[Misc.scala:38:63] wire _r_c_cat_T_2 = _r_c_cat_T | _r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _r_c_cat_T_4 = _r_c_cat_T_2 | _r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _r_c_cat_T_9 = _r_c_cat_T_5 | _r_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_10 = _r_c_cat_T_9 | _r_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_11 = _r_c_cat_T_10 | _r_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_17 = _r_c_cat_T_12 | _r_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_18 = _r_c_cat_T_17 | _r_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_19 = _r_c_cat_T_18 | _r_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_20 = _r_c_cat_T_19 | _r_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_21 = _r_c_cat_T_11 | _r_c_cat_T_20; // @[package.scala:81:59] wire _r_c_cat_T_22 = _r_c_cat_T_4 | _r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _r_c_cat_T_25 = _r_c_cat_T_23 | _r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _r_c_cat_T_27 = _r_c_cat_T_25 | _r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _r_c_cat_T_32 = _r_c_cat_T_28 | _r_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_33 = _r_c_cat_T_32 | _r_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_34 = _r_c_cat_T_33 | _r_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_40 = _r_c_cat_T_35 | _r_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_41 = _r_c_cat_T_40 | _r_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_42 = _r_c_cat_T_41 | _r_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_43 = _r_c_cat_T_42 | _r_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_44 = _r_c_cat_T_34 | _r_c_cat_T_43; // @[package.scala:81:59] wire _r_c_cat_T_45 = _r_c_cat_T_27 | _r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _r_c_cat_T_47 = _r_c_cat_T_45 | _r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _r_c_cat_T_49 = _r_c_cat_T_47 | _r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] r_c = {_r_c_cat_T_22, _r_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _r_T_64 = {r_c, io_req_bits_old_meta_coh_state_0}; // @[Metadata.scala:29:18, :58:19] wire _r_T_89 = _r_T_64 == 4'hC; // @[Misc.scala:49:20] wire [1:0] _r_T_91 = {1'h0, _r_T_89}; // @[Misc.scala:35:36, :49:20] wire _r_T_92 = _r_T_64 == 4'hD; // @[Misc.scala:49:20] wire [1:0] _r_T_94 = _r_T_92 ? 2'h2 : _r_T_91; // @[Misc.scala:35:36, :49:20] wire _r_T_95 = _r_T_64 == 4'h4; // @[Misc.scala:49:20] wire [1:0] _r_T_97 = _r_T_95 ? 2'h1 : _r_T_94; // @[Misc.scala:35:36, :49:20] wire _r_T_98 = _r_T_64 == 4'h5; // @[Misc.scala:49:20] wire [1:0] _r_T_100 = _r_T_98 ? 2'h2 : _r_T_97; // @[Misc.scala:35:36, :49:20] wire _r_T_101 = _r_T_64 == 4'h0; // @[Misc.scala:49:20] wire [1:0] _r_T_103 = _r_T_101 ? 2'h0 : _r_T_100; // @[Misc.scala:35:36, :49:20] wire _r_T_104 = _r_T_64 == 4'hE; // @[Misc.scala:49:20] wire _r_T_105 = _r_T_104; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_106 = _r_T_104 ? 2'h3 : _r_T_103; // @[Misc.scala:35:36, :49:20] wire _r_T_107 = &_r_T_64; // @[Misc.scala:49:20] wire _r_T_108 = _r_T_107 | _r_T_105; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_109 = _r_T_107 ? 2'h3 : _r_T_106; // @[Misc.scala:35:36, :49:20] wire _r_T_110 = _r_T_64 == 4'h6; // @[Misc.scala:49:20] wire _r_T_111 = _r_T_110 | _r_T_108; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_112 = _r_T_110 ? 2'h2 : _r_T_109; // @[Misc.scala:35:36, :49:20] wire _r_T_113 = _r_T_64 == 4'h7; // @[Misc.scala:49:20] wire _r_T_114 = _r_T_113 | _r_T_111; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_115 = _r_T_113 ? 2'h3 : _r_T_112; // @[Misc.scala:35:36, :49:20] wire _r_T_116 = _r_T_64 == 4'h1; // @[Misc.scala:49:20] wire _r_T_117 = _r_T_116 | _r_T_114; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_118 = _r_T_116 ? 2'h1 : _r_T_115; // @[Misc.scala:35:36, :49:20] wire _r_T_119 = _r_T_64 == 4'h2; // @[Misc.scala:49:20] wire _r_T_120 = _r_T_119 | _r_T_117; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_121 = _r_T_119 ? 2'h2 : _r_T_118; // @[Misc.scala:35:36, :49:20] wire _r_T_122 = _r_T_64 == 4'h3; // @[Misc.scala:49:20] wire is_hit = _r_T_122 | _r_T_120; // @[Misc.scala:35:9, :49:20] wire [1:0] r_2_1 = _r_T_122 ? 2'h3 : _r_T_121; // @[Misc.scala:35:36, :49:20] wire [1:0] coh_on_hit_state = r_2_1; // @[Misc.scala:35:36] wire [3:0] _state_T = {2'h0, ~needs_wb, 1'h1}; // @[Misc.scala:38:9] wire _can_finish_T = ~(|state); // @[package.scala:16:47] wire can_finish = _can_finish_T | _can_finish_T_1; // @[package.scala:16:47, :81:59] wire _grantackq_io_enq_valid_T = io_mem_grant_bits_opcode_0[2]; // @[Edges.scala:71:36] wire _grantackq_io_enq_valid_T_1 = io_mem_grant_bits_opcode_0[1]; // @[Edges.scala:71:52] wire _grantackq_io_enq_valid_T_2 = ~_grantackq_io_enq_valid_T_1; // @[Edges.scala:71:{43,52}] wire _grantackq_io_enq_valid_T_3 = _grantackq_io_enq_valid_T & _grantackq_io_enq_valid_T_2; // @[Edges.scala:71:{36,40,43}] wire _grantackq_io_enq_valid_T_4 = refill_done & _grantackq_io_enq_valid_T_3; // @[Edges.scala:71:40, :233:22] assign _io_mem_finish_valid_T = _grantackq_io_deq_valid & can_finish; // @[package.scala:81:59] assign io_mem_finish_valid_0 = _io_mem_finish_valid_T; // @[NBDcache.scala:150:7, :267:49] wire _grantackq_io_deq_ready_T = io_mem_finish_ready_0 & can_finish; // @[package.scala:81:59] wire _io_idx_match_T = |state; // @[NBDcache.scala:174:22, :203:75, :271:26] assign _io_idx_match_T_1 = _io_idx_match_T & idx_match; // @[NBDcache.scala:180:27, :271:{26,41}] assign io_idx_match_0 = _io_idx_match_T_1; // @[NBDcache.scala:150:7, :271:41] wire [39:0] _io_refill_addr_T = {req_block_addr[39:12], req_block_addr[11:0] | refill_address_inc}; // @[Edges.scala:269:29] assign io_refill_addr_0 = _io_refill_addr_T[11:0]; // @[NBDcache.scala:150:7, :273:{18,36}] assign io_tag_0 = req_tag_0[19:0]; // @[NBDcache.scala:150:7, :178:26, :274:10] assign _io_req_pri_rdy_T = ~(|state); // @[NBDcache.scala:174:22, :203:75, :275:27] assign io_req_pri_rdy_0 = _io_req_pri_rdy_T; // @[NBDcache.scala:150:7, :275:27] assign _io_req_sec_rdy_T = sec_rdy & _rpq_io_enq_ready; // @[NBDcache.scala:195:27, :200:19, :276:29] assign io_req_sec_rdy_0 = _io_req_sec_rdy_T; // @[NBDcache.scala:150:7, :276:29] reg [1:0] meta_hazard; // @[NBDcache.scala:278:28] wire [2:0] _meta_hazard_T = {1'h0, meta_hazard} + 3'h1; // @[NBDcache.scala:278:28, :279:59] wire [1:0] _meta_hazard_T_1 = _meta_hazard_T[1:0]; // @[NBDcache.scala:279:59] wire _io_probe_rdy_T = ~idx_match; // @[NBDcache.scala:180:27, :281:19] wire _io_probe_rdy_T_4 = _io_probe_rdy_T_1 | _io_probe_rdy_T_2; // @[package.scala:16:47, :81:59] wire _io_probe_rdy_T_5 = _io_probe_rdy_T_4 | _io_probe_rdy_T_3; // @[package.scala:16:47, :81:59] wire _io_probe_rdy_T_6 = ~_io_probe_rdy_T_5; // @[package.scala:81:59] wire _io_probe_rdy_T_7 = meta_hazard == 2'h0; // @[NBDcache.scala:278:28, :281:86] wire _io_probe_rdy_T_8 = _io_probe_rdy_T_6 & _io_probe_rdy_T_7; // @[NBDcache.scala:281:{34,71,86}] assign _io_probe_rdy_T_9 = _io_probe_rdy_T | _io_probe_rdy_T_8; // @[NBDcache.scala:281:{19,30,71}] assign io_probe_rdy_0 = _io_probe_rdy_T_9; // @[NBDcache.scala:150:7, :281:30] assign _io_meta_write_valid_T_2 = _io_meta_write_valid_T | _io_meta_write_valid_T_1; // @[package.scala:16:47, :81:59] assign io_meta_write_valid_0 = _io_meta_write_valid_T_2; // @[package.scala:81:59] assign _io_meta_write_bits_data_coh_T_1_state = _io_meta_write_bits_data_coh_T ? coh_on_clear_state : new_coh_state; // @[Metadata.scala:160:20] assign io_meta_write_bits_data_coh_state_0 = _io_meta_write_bits_data_coh_T_1_state; // @[NBDcache.scala:150:7, :286:37] assign io_wb_req_valid_0 = _io_wb_req_valid_T; // @[NBDcache.scala:150:7, :290:28] assign _io_mem_acquire_valid_T_1 = _io_mem_acquire_valid_T & _grantackq_io_enq_ready; // @[NBDcache.scala:263:25, :298:{33,50}] assign io_mem_acquire_valid_0 = _io_mem_acquire_valid_T_1; // @[NBDcache.scala:150:7, :298:50] wire [25:0] _GEN_29 = {io_tag_0, req_idx}; // @[NBDcache.scala:150:7, :177:25, :301:48] wire [25:0] _io_mem_acquire_bits_T; // @[NBDcache.scala:301:48] assign _io_mem_acquire_bits_T = _GEN_29; // @[NBDcache.scala:301:48] wire [25:0] io_replay_bits_addr_hi; // @[NBDcache.scala:313:29] assign io_replay_bits_addr_hi = _GEN_29; // @[NBDcache.scala:301:48, :313:29] wire [31:0] _io_mem_acquire_bits_T_1 = {_io_mem_acquire_bits_T, 6'h0}; // @[NBDcache.scala:179:51, :301:{48,66}] wire [31:0] _io_mem_acquire_bits_legal_T_1 = _io_mem_acquire_bits_T_1; // @[NBDcache.scala:301:66] assign io_mem_acquire_bits_a_address = _io_mem_acquire_bits_T_1; // @[Edges.scala:346:17] wire [32:0] _io_mem_acquire_bits_legal_T_2 = {1'h0, _io_mem_acquire_bits_legal_T_1}; // @[Parameters.scala:137:{31,41}] wire [32:0] _io_mem_acquire_bits_legal_T_3 = _io_mem_acquire_bits_legal_T_2 & 33'h8C000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _io_mem_acquire_bits_legal_T_4 = _io_mem_acquire_bits_legal_T_3; // @[Parameters.scala:137:46] wire _io_mem_acquire_bits_legal_T_5 = _io_mem_acquire_bits_legal_T_4 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _io_mem_acquire_bits_legal_T_6 = {_io_mem_acquire_bits_T_1[31:17], _io_mem_acquire_bits_T_1[16:0] ^ 17'h10000}; // @[NBDcache.scala:301:66] wire [32:0] _io_mem_acquire_bits_legal_T_7 = {1'h0, _io_mem_acquire_bits_legal_T_6}; // @[Parameters.scala:137:{31,41}] wire [32:0] _io_mem_acquire_bits_legal_T_8 = _io_mem_acquire_bits_legal_T_7 & 33'h8C011000; // @[Parameters.scala:137:{41,46}] wire [32:0] _io_mem_acquire_bits_legal_T_9 = _io_mem_acquire_bits_legal_T_8; // @[Parameters.scala:137:46] wire _io_mem_acquire_bits_legal_T_10 = _io_mem_acquire_bits_legal_T_9 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _io_mem_acquire_bits_legal_T_11 = {_io_mem_acquire_bits_T_1[31:28], _io_mem_acquire_bits_T_1[27:0] ^ 28'hC000000}; // @[NBDcache.scala:301:66] wire [32:0] _io_mem_acquire_bits_legal_T_12 = {1'h0, _io_mem_acquire_bits_legal_T_11}; // @[Parameters.scala:137:{31,41}] wire [32:0] _io_mem_acquire_bits_legal_T_13 = _io_mem_acquire_bits_legal_T_12 & 33'h8C000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _io_mem_acquire_bits_legal_T_14 = _io_mem_acquire_bits_legal_T_13; // @[Parameters.scala:137:46] wire _io_mem_acquire_bits_legal_T_15 = _io_mem_acquire_bits_legal_T_14 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _io_mem_acquire_bits_legal_T_16 = _io_mem_acquire_bits_legal_T_5 | _io_mem_acquire_bits_legal_T_10; // @[Parameters.scala:685:42] wire _io_mem_acquire_bits_legal_T_17 = _io_mem_acquire_bits_legal_T_16 | _io_mem_acquire_bits_legal_T_15; // @[Parameters.scala:685:42] wire [31:0] _io_mem_acquire_bits_legal_T_21 = {_io_mem_acquire_bits_T_1[31:28], _io_mem_acquire_bits_T_1[27:0] ^ 28'h8000000}; // @[NBDcache.scala:301:66] wire [32:0] _io_mem_acquire_bits_legal_T_22 = {1'h0, _io_mem_acquire_bits_legal_T_21}; // @[Parameters.scala:137:{31,41}] wire [32:0] _io_mem_acquire_bits_legal_T_23 = _io_mem_acquire_bits_legal_T_22 & 33'h8C010000; // @[Parameters.scala:137:{41,46}] wire [32:0] _io_mem_acquire_bits_legal_T_24 = _io_mem_acquire_bits_legal_T_23; // @[Parameters.scala:137:46] wire _io_mem_acquire_bits_legal_T_25 = _io_mem_acquire_bits_legal_T_24 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] _io_mem_acquire_bits_legal_T_26 = _io_mem_acquire_bits_T_1 ^ 32'h80000000; // @[NBDcache.scala:301:66] wire [32:0] _io_mem_acquire_bits_legal_T_27 = {1'h0, _io_mem_acquire_bits_legal_T_26}; // @[Parameters.scala:137:{31,41}] wire [32:0] _io_mem_acquire_bits_legal_T_28 = _io_mem_acquire_bits_legal_T_27 & 33'h80000000; // @[Parameters.scala:137:{41,46}] wire [32:0] _io_mem_acquire_bits_legal_T_29 = _io_mem_acquire_bits_legal_T_28; // @[Parameters.scala:137:46] wire _io_mem_acquire_bits_legal_T_30 = _io_mem_acquire_bits_legal_T_29 == 33'h0; // @[Parameters.scala:137:{46,59}] wire _io_mem_acquire_bits_legal_T_31 = _io_mem_acquire_bits_legal_T_25 | _io_mem_acquire_bits_legal_T_30; // @[Parameters.scala:685:42] wire _io_mem_acquire_bits_legal_T_32 = _io_mem_acquire_bits_legal_T_31; // @[Parameters.scala:684:54, :685:42] wire io_mem_acquire_bits_legal = _io_mem_acquire_bits_legal_T_32; // @[Parameters.scala:684:54, :686:26] assign io_mem_acquire_bits_param_0 = io_mem_acquire_bits_a_param; // @[Edges.scala:346:17] assign io_mem_acquire_bits_address_0 = io_mem_acquire_bits_a_address; // @[Edges.scala:346:17] assign io_mem_acquire_bits_a_param = {1'h0, grow_param}; // @[Misc.scala:35:36] wire io_mem_acquire_bits_a_mask_sub_sub_bit = _io_mem_acquire_bits_T_1[2]; // @[Misc.scala:210:26] wire io_mem_acquire_bits_a_mask_sub_sub_1_2 = io_mem_acquire_bits_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire io_mem_acquire_bits_a_mask_sub_sub_nbit = ~io_mem_acquire_bits_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire io_mem_acquire_bits_a_mask_sub_sub_0_2 = io_mem_acquire_bits_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _io_mem_acquire_bits_a_mask_sub_sub_acc_T = io_mem_acquire_bits_a_mask_sub_sub_0_2; // @[Misc.scala:214:27, :215:38] wire _io_mem_acquire_bits_a_mask_sub_sub_acc_T_1 = io_mem_acquire_bits_a_mask_sub_sub_1_2; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_sub_bit = _io_mem_acquire_bits_T_1[1]; // @[Misc.scala:210:26] wire io_mem_acquire_bits_a_mask_sub_nbit = ~io_mem_acquire_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire io_mem_acquire_bits_a_mask_sub_0_2 = io_mem_acquire_bits_a_mask_sub_sub_0_2 & io_mem_acquire_bits_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire io_mem_acquire_bits_a_mask_sub_1_2 = io_mem_acquire_bits_a_mask_sub_sub_0_2 & io_mem_acquire_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire io_mem_acquire_bits_a_mask_sub_2_2 = io_mem_acquire_bits_a_mask_sub_sub_1_2 & io_mem_acquire_bits_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire io_mem_acquire_bits_a_mask_sub_3_2 = io_mem_acquire_bits_a_mask_sub_sub_1_2 & io_mem_acquire_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire io_mem_acquire_bits_a_mask_bit = _io_mem_acquire_bits_T_1[0]; // @[Misc.scala:210:26] wire io_mem_acquire_bits_a_mask_nbit = ~io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :211:20] wire io_mem_acquire_bits_a_mask_eq = io_mem_acquire_bits_a_mask_sub_0_2 & io_mem_acquire_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T = io_mem_acquire_bits_a_mask_eq; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_1 = io_mem_acquire_bits_a_mask_sub_0_2 & io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_1 = io_mem_acquire_bits_a_mask_eq_1; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_2 = io_mem_acquire_bits_a_mask_sub_1_2 & io_mem_acquire_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_2 = io_mem_acquire_bits_a_mask_eq_2; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_3 = io_mem_acquire_bits_a_mask_sub_1_2 & io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_3 = io_mem_acquire_bits_a_mask_eq_3; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_4 = io_mem_acquire_bits_a_mask_sub_2_2 & io_mem_acquire_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_4 = io_mem_acquire_bits_a_mask_eq_4; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_5 = io_mem_acquire_bits_a_mask_sub_2_2 & io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_5 = io_mem_acquire_bits_a_mask_eq_5; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_6 = io_mem_acquire_bits_a_mask_sub_3_2 & io_mem_acquire_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_6 = io_mem_acquire_bits_a_mask_eq_6; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_7 = io_mem_acquire_bits_a_mask_sub_3_2 & io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_7 = io_mem_acquire_bits_a_mask_eq_7; // @[Misc.scala:214:27, :215:38] assign io_meta_read_valid_0 = _io_meta_read_valid_T; // @[NBDcache.scala:150:7, :305:31] assign _io_replay_valid_T_1 = _io_replay_valid_T & _rpq_io_deq_valid; // @[NBDcache.scala:200:19, :310:{28,44}] assign io_replay_valid_0 = _io_replay_valid_T_1; // @[NBDcache.scala:150:7, :310:44] wire [5:0] _io_replay_bits_addr_T = _rpq_io_deq_bits_addr[5:0]; // @[NBDcache.scala:200:19, :313:67] wire [31:0] _io_replay_bits_addr_T_1 = {io_replay_bits_addr_hi, _io_replay_bits_addr_T}; // @[NBDcache.scala:313:{29,67}] assign io_replay_bits_addr_0 = {8'h0, _io_replay_bits_addr_T_1}; // @[Edges.scala:232:25] assign io_replay_bits_cmd_0 = io_meta_read_ready_0 ? _rpq_io_deq_bits_cmd : 5'h5; // @[NBDcache.scala:150:7, :200:19, :311:18, :315:30, :317:24] wire _T_7 = _sec_rdy_T_6 & refill_done; // @[package.scala:16:47] wire _T_15 = io_req_sec_val_0 & io_req_sec_rdy_0; // @[NBDcache.scala:150:7, :234:24] always @(posedge clock) begin // @[NBDcache.scala:150:7] if (reset) begin // @[NBDcache.scala:150:7] state <= 4'h0; // @[NBDcache.scala:174:22] new_coh_state <= 2'h0; // @[NBDcache.scala:182:24] r_counter <= 9'h0; // @[Edges.scala:229:27] meta_hazard <= 2'h0; // @[NBDcache.scala:278:28] end else begin // @[NBDcache.scala:150:7] if (_rpq_io_enq_valid_T) begin // @[NBDcache.scala:201:39] state <= io_req_bits_tag_match_0 ? {2'h1, is_hit, 1'h0} : _state_T; // @[Misc.scala:35:9] new_coh_state <= io_req_bits_tag_match_0 ? (is_hit ? coh_on_hit_state : io_req_bits_old_meta_coh_state_0) : 2'h0; // @[Misc.scala:35:9] end else begin // @[NBDcache.scala:201:39] if (io_wb_req_ready_0 & io_wb_req_valid_0) // @[Decoupled.scala:51:35] state <= 4'h2; // @[NBDcache.scala:174:22] else if (_T_11 & io_wb_req_ready_0 & acked) // @[package.scala:16:47] state <= 4'h3; // @[NBDcache.scala:174:22] else if (_T_9 & io_meta_write_ready_0) // @[package.scala:16:47] state <= 4'h4; // @[NBDcache.scala:174:22] else if (io_mem_acquire_ready_0 & io_mem_acquire_valid_0) // @[Decoupled.scala:51:35] state <= 4'h5; // @[NBDcache.scala:174:22] else if (_T_7) // @[NBDcache.scala:218:33] state <= 4'h6; // @[NBDcache.scala:174:22] else if (_io_meta_write_valid_T & io_meta_write_ready_0) // @[package.scala:16:47] state <= 4'h7; // @[NBDcache.scala:174:22] else if (state == 4'h7) // @[NBDcache.scala:174:22, :211:15] state <= 4'h8; // @[NBDcache.scala:174:22] else if (_T & ~_rpq_io_deq_valid) // @[NBDcache.scala:200:19, :203:49, :208:{31,34}] state <= 4'h0; // @[NBDcache.scala:174:22] if (_T_15 & is_hit_again) // @[Metadata.scala:105:27] new_coh_state <= dirtier_coh_state; // @[Metadata.scala:160:20] else if (_T_7) // @[NBDcache.scala:218:33] new_coh_state <= coh_on_grant_state; // @[Metadata.scala:160:20] end if (io_mem_grant_valid_0) // @[NBDcache.scala:150:7] r_counter <= _r_counter_T; // @[Edges.scala:229:27, :236:21] if (io_meta_write_ready_0 & io_meta_write_valid_0) // @[Decoupled.scala:51:35] meta_hazard <= 2'h1; // @[NBDcache.scala:278:28] else if (|meta_hazard) // @[NBDcache.scala:278:28, :279:21] meta_hazard <= _meta_hazard_T_1; // @[NBDcache.scala:278:28, :279:59] end if (_rpq_io_enq_valid_T) begin // @[NBDcache.scala:201:39] req_addr <= io_req_bits_addr_0; // @[NBDcache.scala:150:7, :176:16] req_tag <= io_req_bits_tag_0; // @[NBDcache.scala:150:7, :176:16] req_cmd <= io_req_bits_cmd_0; // @[NBDcache.scala:150:7, :176:16] req_size <= io_req_bits_size_0; // @[NBDcache.scala:150:7, :176:16] req_signed <= io_req_bits_signed_0; // @[NBDcache.scala:150:7, :176:16] req_dprv <= io_req_bits_dprv_0; // @[NBDcache.scala:150:7, :176:16] req_dv <= io_req_bits_dv_0; // @[NBDcache.scala:150:7, :176:16] req_phys <= io_req_bits_phys_0; // @[NBDcache.scala:150:7, :176:16] req_no_resp <= io_req_bits_no_resp_0; // @[NBDcache.scala:150:7, :176:16] req_no_alloc <= io_req_bits_no_alloc_0; // @[NBDcache.scala:150:7, :176:16] req_no_xcpt <= io_req_bits_no_xcpt_0; // @[NBDcache.scala:150:7, :176:16] req_sdq_id <= io_req_bits_sdq_id_0; // @[NBDcache.scala:150:7, :176:16] req_tag_match <= io_req_bits_tag_match_0; // @[NBDcache.scala:150:7, :176:16] req_old_meta_coh_state <= io_req_bits_old_meta_coh_state_0; // @[NBDcache.scala:150:7, :176:16] req_old_meta_tag <= io_req_bits_old_meta_tag_0; // @[NBDcache.scala:150:7, :176:16] req_way_en <= io_req_bits_way_en_0; // @[NBDcache.scala:150:7, :176:16] end else if (_T_15) // @[NBDcache.scala:234:24] req_cmd <= dirtier_cmd; // @[Metadata.scala:109:27] acked <= ~_rpq_io_enq_valid_T & (io_mem_grant_valid_0 | acked); // @[NBDcache.scala:150:7, :201:39, :205:18, :206:{29,37}, :243:43, :245:11] always @(posedge) Queue16_ReplayInternal_1 rpq ( // @[NBDcache.scala:200:19] .clock (clock), .reset (reset), .io_enq_ready (_rpq_io_enq_ready), .io_enq_valid (_rpq_io_enq_valid_T_7), // @[NBDcache.scala:201:87] .io_enq_bits_addr (io_req_bits_addr_0), // @[NBDcache.scala:150:7] .io_enq_bits_tag (io_req_bits_tag_0), // @[NBDcache.scala:150:7] .io_enq_bits_cmd (io_req_bits_cmd_0), // @[NBDcache.scala:150:7] .io_enq_bits_size (io_req_bits_size_0), // @[NBDcache.scala:150:7] .io_enq_bits_signed (io_req_bits_signed_0), // @[NBDcache.scala:150:7] .io_enq_bits_dprv (io_req_bits_dprv_0), // @[NBDcache.scala:150:7] .io_enq_bits_dv (io_req_bits_dv_0), // @[NBDcache.scala:150:7] .io_enq_bits_phys (io_req_bits_phys_0), // @[NBDcache.scala:150:7] .io_enq_bits_no_resp (io_req_bits_no_resp_0), // @[NBDcache.scala:150:7] .io_enq_bits_no_alloc (io_req_bits_no_alloc_0), // @[NBDcache.scala:150:7] .io_enq_bits_no_xcpt (io_req_bits_no_xcpt_0), // @[NBDcache.scala:150:7] .io_enq_bits_sdq_id (io_req_bits_sdq_id_0), // @[NBDcache.scala:150:7] .io_deq_ready (io_meta_read_ready_0 & _rpq_io_deq_ready_T_3), // @[NBDcache.scala:150:7, :203:{20,66}, :315:30, :316:22] .io_deq_valid (_rpq_io_deq_valid), .io_deq_bits_addr (_rpq_io_deq_bits_addr), .io_deq_bits_tag (io_replay_bits_tag_0), .io_deq_bits_cmd (_rpq_io_deq_bits_cmd), .io_deq_bits_size (io_replay_bits_size_0), .io_deq_bits_signed (io_replay_bits_signed_0), .io_deq_bits_dprv (io_replay_bits_dprv_0), .io_deq_bits_dv (io_replay_bits_dv_0), .io_deq_bits_no_resp (io_replay_bits_no_resp_0), .io_deq_bits_no_alloc (io_replay_bits_no_alloc_0), .io_deq_bits_no_xcpt (io_replay_bits_no_xcpt_0), .io_deq_bits_sdq_id (io_replay_bits_sdq_id_0) ); // @[NBDcache.scala:200:19] Queue1_TLBundleE_a32d64s2k3z4c_1 grantackq ( // @[NBDcache.scala:263:25] .clock (clock), .reset (reset), .io_enq_ready (_grantackq_io_enq_ready), .io_enq_valid (_grantackq_io_enq_valid_T_4), // @[NBDcache.scala:265:41] .io_enq_bits_sink (grantackq_io_enq_bits_e_sink), // @[Edges.scala:451:17] .io_deq_ready (_grantackq_io_deq_ready_T), // @[NBDcache.scala:269:49] .io_deq_valid (_grantackq_io_deq_valid), .io_deq_bits_sink (io_mem_finish_bits_sink_0) ); // @[NBDcache.scala:263:25] assign io_req_pri_rdy = io_req_pri_rdy_0; // @[NBDcache.scala:150:7] assign io_req_sec_rdy = io_req_sec_rdy_0; // @[NBDcache.scala:150:7] assign io_idx_match = io_idx_match_0; // @[NBDcache.scala:150:7] assign io_tag = io_tag_0; // @[NBDcache.scala:150:7] assign io_mem_acquire_valid = io_mem_acquire_valid_0; // @[NBDcache.scala:150:7] assign io_mem_acquire_bits_param = io_mem_acquire_bits_param_0; // @[NBDcache.scala:150:7] assign io_mem_acquire_bits_address = io_mem_acquire_bits_address_0; // @[NBDcache.scala:150:7] assign io_mem_finish_valid = io_mem_finish_valid_0; // @[NBDcache.scala:150:7] assign io_mem_finish_bits_sink = io_mem_finish_bits_sink_0; // @[NBDcache.scala:150:7] assign io_refill_way_en = io_refill_way_en_0; // @[NBDcache.scala:150:7] assign io_refill_addr = io_refill_addr_0; // @[NBDcache.scala:150:7] assign io_meta_read_valid = io_meta_read_valid_0; // @[NBDcache.scala:150:7] assign io_meta_read_bits_idx = io_meta_read_bits_idx_0; // @[NBDcache.scala:150:7] assign io_meta_read_bits_tag = io_meta_read_bits_tag_0; // @[NBDcache.scala:150:7] assign io_meta_write_valid = io_meta_write_valid_0; // @[NBDcache.scala:150:7] assign io_meta_write_bits_idx = io_meta_write_bits_idx_0; // @[NBDcache.scala:150:7] assign io_meta_write_bits_way_en = io_meta_write_bits_way_en_0; // @[NBDcache.scala:150:7] assign io_meta_write_bits_tag = io_meta_write_bits_tag_0; // @[NBDcache.scala:150:7] assign io_meta_write_bits_data_coh_state = io_meta_write_bits_data_coh_state_0; // @[NBDcache.scala:150:7] assign io_meta_write_bits_data_tag = io_meta_write_bits_data_tag_0; // @[NBDcache.scala:150:7] assign io_replay_valid = io_replay_valid_0; // @[NBDcache.scala:150:7] assign io_replay_bits_addr = io_replay_bits_addr_0; // @[NBDcache.scala:150:7] assign io_replay_bits_tag = io_replay_bits_tag_0; // @[NBDcache.scala:150:7] assign io_replay_bits_cmd = io_replay_bits_cmd_0; // @[NBDcache.scala:150:7] assign io_replay_bits_size = io_replay_bits_size_0; // @[NBDcache.scala:150:7] assign io_replay_bits_signed = io_replay_bits_signed_0; // @[NBDcache.scala:150:7] assign io_replay_bits_dprv = io_replay_bits_dprv_0; // @[NBDcache.scala:150:7] assign io_replay_bits_dv = io_replay_bits_dv_0; // @[NBDcache.scala:150:7] assign io_replay_bits_no_resp = io_replay_bits_no_resp_0; // @[NBDcache.scala:150:7] assign io_replay_bits_no_alloc = io_replay_bits_no_alloc_0; // @[NBDcache.scala:150:7] assign io_replay_bits_no_xcpt = io_replay_bits_no_xcpt_0; // @[NBDcache.scala:150:7] assign io_replay_bits_sdq_id = io_replay_bits_sdq_id_0; // @[NBDcache.scala:150:7] assign io_wb_req_valid = io_wb_req_valid_0; // @[NBDcache.scala:150:7] assign io_wb_req_bits_tag = io_wb_req_bits_tag_0; // @[NBDcache.scala:150:7] assign io_wb_req_bits_idx = io_wb_req_bits_idx_0; // @[NBDcache.scala:150:7] assign io_wb_req_bits_param = io_wb_req_bits_param_0; // @[NBDcache.scala:150:7] assign io_wb_req_bits_way_en = io_wb_req_bits_way_en_0; // @[NBDcache.scala:150:7] assign io_probe_rdy = io_probe_rdy_0; // @[NBDcache.scala:150:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_42( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [4:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [4:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [4:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [4:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10] wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27] wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61] wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28] wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [4:0] _c_first_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_first_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_first_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_first_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_set_wo_ready_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_set_wo_ready_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_opcodes_set_interm_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_opcodes_set_interm_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_sizes_set_interm_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_sizes_set_interm_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_opcodes_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_opcodes_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_sizes_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_sizes_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_probe_ack_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_probe_ack_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_probe_ack_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_probe_ack_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _same_cycle_resp_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _same_cycle_resp_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _same_cycle_resp_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _same_cycle_resp_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _same_cycle_resp_WIRE_4_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _same_cycle_resp_WIRE_5_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [258:0] _c_opcodes_set_T_1 = 259'h0; // @[Monitor.scala:767:54] wire [258:0] _c_sizes_set_T_1 = 259'h0; // @[Monitor.scala:768:52] wire [7:0] _c_opcodes_set_T = 8'h0; // @[Monitor.scala:767:79] wire [7:0] _c_sizes_set_T = 8'h0; // @[Monitor.scala:768:77] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51] wire [31:0] _c_set_wo_ready_T = 32'h1; // @[OneHot.scala:58:35] wire [31:0] _c_set_T = 32'h1; // @[OneHot.scala:58:35] wire [79:0] c_opcodes_set = 80'h0; // @[Monitor.scala:740:34] wire [79:0] c_sizes_set = 80'h0; // @[Monitor.scala:741:34] wire [19:0] c_set = 20'h0; // @[Monitor.scala:738:34] wire [19:0] c_set_wo_ready = 20'h0; // @[Monitor.scala:739:34] wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [4:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_4 = source_ok_uncommonBits < 5'h14; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20] wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T = {26'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [4:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}] wire [4:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_10 = source_ok_uncommonBits_1 < 5'h14; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20] wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31] wire _T_732 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_732; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_732; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [4:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _T_805 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_805; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_805; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_805; // @[Decoupled.scala:51:35] wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [4:0] source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [19:0] inflight; // @[Monitor.scala:614:27] reg [79:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [79:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [19:0] a_set; // @[Monitor.scala:626:34] wire [19:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [79:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [79:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [7:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [7:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [7:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [7:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [7:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [7:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [7:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [7:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [7:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [79:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [79:0] _a_opcode_lookup_T_6 = {76'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [79:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[79:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [79:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [79:0] _a_size_lookup_T_6 = {76'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [79:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[79:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [31:0] _GEN_2 = 32'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [31:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [31:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[19:0] : 20'h0; // @[OneHot.scala:58:35] wire _T_658 = _T_732 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_658 ? _a_set_T[19:0] : 20'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_658 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_658 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [7:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [7:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [7:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [258:0] _a_opcodes_set_T_1 = {255'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_658 ? _a_opcodes_set_T_1[79:0] : 80'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [258:0] _a_sizes_set_T_1 = {255'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_658 ? _a_sizes_set_T_1[79:0] : 80'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [19:0] d_clr; // @[Monitor.scala:664:34] wire [19:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [79:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [79:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_704 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [31:0] _GEN_5 = 32'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [31:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [31:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [31:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [31:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_704 & ~d_release_ack ? _d_clr_wo_ready_T[19:0] : 20'h0; // @[OneHot.scala:58:35] wire _T_673 = _T_805 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_673 ? _d_clr_T[19:0] : 20'h0; // @[OneHot.scala:58:35] wire [270:0] _d_opcodes_clr_T_5 = 271'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_673 ? _d_opcodes_clr_T_5[79:0] : 80'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [270:0] _d_sizes_clr_T_5 = 271'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_673 ? _d_sizes_clr_T_5[79:0] : 80'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [19:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [19:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [19:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [79:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [79:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [79:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [79:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [79:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [79:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [19:0] inflight_1; // @[Monitor.scala:726:35] wire [19:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [79:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [79:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [79:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [79:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [79:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [79:0] _c_opcode_lookup_T_6 = {76'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [79:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[79:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [79:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [79:0] _c_size_lookup_T_6 = {76'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [79:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[79:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [19:0] d_clr_1; // @[Monitor.scala:774:34] wire [19:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [79:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [79:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_776 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_776 & d_release_ack_1 ? _d_clr_wo_ready_T_1[19:0] : 20'h0; // @[OneHot.scala:58:35] wire _T_758 = _T_805 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_758 ? _d_clr_T_1[19:0] : 20'h0; // @[OneHot.scala:58:35] wire [270:0] _d_opcodes_clr_T_11 = 271'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_758 ? _d_opcodes_clr_T_11[79:0] : 80'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [270:0] _d_sizes_clr_T_11 = 271'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_758 ? _d_sizes_clr_T_11[79:0] : 80'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 5'h0; // @[Monitor.scala:36:7, :795:113] wire [19:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [19:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [79:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [79:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [79:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [79:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_2( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [2:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [28:0] address; // @[Monitor.scala:391:22] reg [2:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg denied; // @[Monitor.scala:543:22] reg [64:0] inflight; // @[Monitor.scala:614:27] reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [259:0] inflight_sizes; // @[Monitor.scala:618:33] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_0 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_1 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [64:0] inflight_1; // @[Monitor.scala:726:35] reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File Crossing.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg} @deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2") class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val intnode = IntAdapterNode() lazy val module = new Impl class Impl extends LazyModuleImp(this) { (intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) => out := SynchronizerShiftReg(in, sync) } } } object IntSyncCrossingSource { def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) = { val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered)) intsource.node } } class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule { val node = IntSyncSourceNode(alreadyRegistered) lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl) class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := AsyncResetReg(Cat(in.reverse)).asBools } } class ImplRegistered extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := in } } } object IntSyncCrossingSink { @deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2") def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(sync) lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := SynchronizerShiftReg(in.sync, sync) } } } object IntSyncAsyncCrossingSink { def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(0) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := in.sync } } } object IntSyncSyncCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncSyncCrossingSink()) intsink.node } } class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(1) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := RegNext(in.sync) } } } object IntSyncRationalCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncRationalCrossingSink()) intsink.node } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module IntSyncAsyncCrossingSink_n1x1_2( // @[Crossing.scala:74:9] input clock, // @[Crossing.scala:74:9] input reset, // @[Crossing.scala:74:9] input auto_in_sync_0, // @[LazyModuleImp.scala:107:25] output auto_out_0 // @[LazyModuleImp.scala:107:25] ); wire auto_in_sync_0_0 = auto_in_sync_0; // @[Crossing.scala:74:9] wire nodeIn_sync_0 = auto_in_sync_0_0; // @[Crossing.scala:74:9] wire nodeOut_0; // @[MixedNode.scala:542:17] wire auto_out_0_0; // @[Crossing.scala:74:9] assign auto_out_0_0 = nodeOut_0; // @[Crossing.scala:74:9] SynchronizerShiftReg_w1_d3_2 chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (nodeIn_sync_0), // @[MixedNode.scala:551:17] .io_q (nodeOut_0) ); // @[ShiftReg.scala:45:23] assign auto_out_0 = auto_out_0_0; // @[Crossing.scala:74:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_130( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_147 io_out_sink_extend ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_161( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] output io_q // @[ShiftReg.scala:36:14] ); wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire io_d = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41] wire _output_T_1 = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_281 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File Nodes.scala: package constellation.channel import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Parameters, Field} import freechips.rocketchip.diplomacy._ case class EmptyParams() case class ChannelEdgeParams(cp: ChannelParams, p: Parameters) object ChannelImp extends SimpleNodeImp[EmptyParams, ChannelParams, ChannelEdgeParams, Channel] { def edge(pd: EmptyParams, pu: ChannelParams, p: Parameters, sourceInfo: SourceInfo) = { ChannelEdgeParams(pu, p) } def bundle(e: ChannelEdgeParams) = new Channel(e.cp)(e.p) def render(e: ChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) { RenderedEdge(colour = "ffffff", label = "X") } else { RenderedEdge(colour = "#0000ff", label = e.cp.payloadBits.toString) } override def monitor(bundle: Channel, edge: ChannelEdgeParams): Unit = { val monitor = Module(new NoCMonitor(edge.cp)(edge.p)) monitor.io.in := bundle } // TODO: Add nodepath stuff? override def mixO, override def mixI } case class ChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(ChannelImp)(Seq(EmptyParams())) case class ChannelDestNode(val destParams: ChannelParams)(implicit valName: ValName) extends SinkNode(ChannelImp)(Seq(destParams)) case class ChannelAdapterNode( slaveFn: ChannelParams => ChannelParams = { d => d })( implicit valName: ValName) extends AdapterNode(ChannelImp)((e: EmptyParams) => e, slaveFn) case class ChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(ChannelImp)() case class ChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(ChannelImp)() case class IngressChannelEdgeParams(cp: IngressChannelParams, p: Parameters) case class EgressChannelEdgeParams(cp: EgressChannelParams, p: Parameters) object IngressChannelImp extends SimpleNodeImp[EmptyParams, IngressChannelParams, IngressChannelEdgeParams, IngressChannel] { def edge(pd: EmptyParams, pu: IngressChannelParams, p: Parameters, sourceInfo: SourceInfo) = { IngressChannelEdgeParams(pu, p) } def bundle(e: IngressChannelEdgeParams) = new IngressChannel(e.cp)(e.p) def render(e: IngressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) { RenderedEdge(colour = "ffffff", label = "X") } else { RenderedEdge(colour = "#00ff00", label = e.cp.payloadBits.toString) } } object EgressChannelImp extends SimpleNodeImp[EmptyParams, EgressChannelParams, EgressChannelEdgeParams, EgressChannel] { def edge(pd: EmptyParams, pu: EgressChannelParams, p: Parameters, sourceInfo: SourceInfo) = { EgressChannelEdgeParams(pu, p) } def bundle(e: EgressChannelEdgeParams) = new EgressChannel(e.cp)(e.p) def render(e: EgressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) { RenderedEdge(colour = "ffffff", label = "X") } else { RenderedEdge(colour = "#ff0000", label = e.cp.payloadBits.toString) } } case class IngressChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(IngressChannelImp)(Seq(EmptyParams())) case class IngressChannelDestNode(val destParams: IngressChannelParams)(implicit valName: ValName) extends SinkNode(IngressChannelImp)(Seq(destParams)) case class EgressChannelSourceNode(val egressId: Int)(implicit valName: ValName) extends SourceNode(EgressChannelImp)(Seq(EmptyParams())) case class EgressChannelDestNode(val destParams: EgressChannelParams)(implicit valName: ValName) extends SinkNode(EgressChannelImp)(Seq(destParams)) case class IngressChannelAdapterNode( slaveFn: IngressChannelParams => IngressChannelParams = { d => d })( implicit valName: ValName) extends AdapterNode(IngressChannelImp)(m => m, slaveFn) case class EgressChannelAdapterNode( slaveFn: EgressChannelParams => EgressChannelParams = { d => d })( implicit valName: ValName) extends AdapterNode(EgressChannelImp)(m => m, slaveFn) case class IngressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(IngressChannelImp)() case class EgressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(EgressChannelImp)() case class IngressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(IngressChannelImp)() case class EgressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(EgressChannelImp)() File Router.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy._ import freechips.rocketchip.util._ import constellation.channel._ import constellation.routing.{RoutingRelation} import constellation.noc.{HasNoCParams} case class UserRouterParams( // Payload width. Must match payload width on all channels attached to this routing node payloadBits: Int = 64, // Combines SA and ST stages (removes pipeline register) combineSAST: Boolean = false, // Combines RC and VA stages (removes pipeline register) combineRCVA: Boolean = false, // Adds combinational path from SA to VA coupleSAVA: Boolean = false, vcAllocator: VCAllocatorParams => Parameters => VCAllocator = (vP) => (p) => new RotatingSingleVCAllocator(vP)(p) ) case class RouterParams( nodeId: Int, nIngress: Int, nEgress: Int, user: UserRouterParams ) trait HasRouterOutputParams { def outParams: Seq[ChannelParams] def egressParams: Seq[EgressChannelParams] def allOutParams = outParams ++ egressParams def nOutputs = outParams.size def nEgress = egressParams.size def nAllOutputs = allOutParams.size } trait HasRouterInputParams { def inParams: Seq[ChannelParams] def ingressParams: Seq[IngressChannelParams] def allInParams = inParams ++ ingressParams def nInputs = inParams.size def nIngress = ingressParams.size def nAllInputs = allInParams.size } trait HasRouterParams { def routerParams: RouterParams def nodeId = routerParams.nodeId def payloadBits = routerParams.user.payloadBits } class DebugBundle(val nIn: Int) extends Bundle { val va_stall = Vec(nIn, UInt()) val sa_stall = Vec(nIn, UInt()) } class Router( val routerParams: RouterParams, preDiplomaticInParams: Seq[ChannelParams], preDiplomaticIngressParams: Seq[IngressChannelParams], outDests: Seq[Int], egressIds: Seq[Int] )(implicit p: Parameters) extends LazyModule with HasNoCParams with HasRouterParams { val allPreDiplomaticInParams = preDiplomaticInParams ++ preDiplomaticIngressParams val destNodes = preDiplomaticInParams.map(u => ChannelDestNode(u)) val sourceNodes = outDests.map(u => ChannelSourceNode(u)) val ingressNodes = preDiplomaticIngressParams.map(u => IngressChannelDestNode(u)) val egressNodes = egressIds.map(u => EgressChannelSourceNode(u)) val debugNode = BundleBridgeSource(() => new DebugBundle(allPreDiplomaticInParams.size)) val ctrlNode = if (hasCtrl) Some(BundleBridgeSource(() => new RouterCtrlBundle)) else None def inParams = module.inParams def outParams = module.outParams def ingressParams = module.ingressParams def egressParams = module.egressParams lazy val module = new LazyModuleImp(this) with HasRouterInputParams with HasRouterOutputParams { val (io_in, edgesIn) = destNodes.map(_.in(0)).unzip val (io_out, edgesOut) = sourceNodes.map(_.out(0)).unzip val (io_ingress, edgesIngress) = ingressNodes.map(_.in(0)).unzip val (io_egress, edgesEgress) = egressNodes.map(_.out(0)).unzip val io_debug = debugNode.out(0)._1 val inParams = edgesIn.map(_.cp) val outParams = edgesOut.map(_.cp) val ingressParams = edgesIngress.map(_.cp) val egressParams = edgesEgress.map(_.cp) allOutParams.foreach(u => require(u.srcId == nodeId && u.payloadBits == routerParams.user.payloadBits)) allInParams.foreach(u => require(u.destId == nodeId && u.payloadBits == routerParams.user.payloadBits)) require(nIngress == routerParams.nIngress) require(nEgress == routerParams.nEgress) require(nAllInputs >= 1) require(nAllOutputs >= 1) require(nodeId < (1 << nodeIdBits)) val input_units = inParams.zipWithIndex.map { case (u,i) => Module(new InputUnit(u, outParams, egressParams, routerParams.user.combineRCVA, routerParams.user.combineSAST)) .suggestName(s"input_unit_${i}_from_${u.srcId}") } val ingress_units = ingressParams.zipWithIndex.map { case (u,i) => Module(new IngressUnit(i, u, outParams, egressParams, routerParams.user.combineRCVA, routerParams.user.combineSAST)) .suggestName(s"ingress_unit_${i+nInputs}_from_${u.ingressId}") } val all_input_units = input_units ++ ingress_units val output_units = outParams.zipWithIndex.map { case (u,i) => Module(new OutputUnit(inParams, ingressParams, u)) .suggestName(s"output_unit_${i}_to_${u.destId}")} val egress_units = egressParams.zipWithIndex.map { case (u,i) => Module(new EgressUnit(routerParams.user.coupleSAVA && all_input_units.size == 1, routerParams.user.combineSAST, inParams, ingressParams, u)) .suggestName(s"egress_unit_${i+nOutputs}_to_${u.egressId}")} val all_output_units = output_units ++ egress_units val switch = Module(new Switch(routerParams, inParams, outParams, ingressParams, egressParams)) val switch_allocator = Module(new SwitchAllocator(routerParams, inParams, outParams, ingressParams, egressParams)) val vc_allocator = Module(routerParams.user.vcAllocator( VCAllocatorParams(routerParams, inParams, outParams, ingressParams, egressParams) )(p)) val route_computer = Module(new RouteComputer(routerParams, inParams, outParams, ingressParams, egressParams)) val fires_count = WireInit(PopCount(vc_allocator.io.req.map(_.fire))) dontTouch(fires_count) (io_in zip input_units ).foreach { case (i,u) => u.io.in <> i } (io_ingress zip ingress_units).foreach { case (i,u) => u.io.in <> i.flit } (output_units zip io_out ).foreach { case (u,o) => o <> u.io.out } (egress_units zip io_egress).foreach { case (u,o) => o.flit <> u.io.out } (route_computer.io.req zip all_input_units).foreach { case (i,u) => i <> u.io.router_req } (all_input_units zip route_computer.io.resp).foreach { case (u,o) => u.io.router_resp <> o } (vc_allocator.io.req zip all_input_units).foreach { case (i,u) => i <> u.io.vcalloc_req } (all_input_units zip vc_allocator.io.resp).foreach { case (u,o) => u.io.vcalloc_resp <> o } (all_output_units zip vc_allocator.io.out_allocs).foreach { case (u,a) => u.io.allocs <> a } (vc_allocator.io.channel_status zip all_output_units).foreach { case (a,u) => a := u.io.channel_status } all_input_units.foreach(in => all_output_units.zipWithIndex.foreach { case (out,outIdx) => in.io.out_credit_available(outIdx) := out.io.credit_available }) (all_input_units zip switch_allocator.io.req).foreach { case (u,r) => r <> u.io.salloc_req } (all_output_units zip switch_allocator.io.credit_alloc).foreach { case (u,a) => u.io.credit_alloc := a } (switch.io.in zip all_input_units).foreach { case (i,u) => i <> u.io.out } (all_output_units zip switch.io.out).foreach { case (u,o) => u.io.in <> o } switch.io.sel := (if (routerParams.user.combineSAST) { switch_allocator.io.switch_sel } else { RegNext(switch_allocator.io.switch_sel) }) if (hasCtrl) { val io_ctrl = ctrlNode.get.out(0)._1 val ctrl = Module(new RouterControlUnit(routerParams, inParams, outParams, ingressParams, egressParams)) io_ctrl <> ctrl.io.ctrl (all_input_units zip ctrl.io.in_block ).foreach { case (l,r) => l.io.block := r } (all_input_units zip ctrl.io.in_fire ).foreach { case (l,r) => r := l.io.out.map(_.valid) } } else { input_units.foreach(_.io.block := false.B) ingress_units.foreach(_.io.block := false.B) } (io_debug.va_stall zip all_input_units.map(_.io.debug.va_stall)).map { case (l,r) => l := r } (io_debug.sa_stall zip all_input_units.map(_.io.debug.sa_stall)).map { case (l,r) => l := r } val debug_tsc = RegInit(0.U(64.W)) debug_tsc := debug_tsc + 1.U val debug_sample = RegInit(0.U(64.W)) debug_sample := debug_sample + 1.U val sample_rate = PlusArg("noc_util_sample_rate", width=20) when (debug_sample === sample_rate - 1.U) { debug_sample := 0.U } def sample(fire: Bool, s: String) = { val util_ctr = RegInit(0.U(64.W)) val fired = RegInit(false.B) util_ctr := util_ctr + fire fired := fired || fire when (sample_rate =/= 0.U && debug_sample === sample_rate - 1.U && fired) { val fmtStr = s"nocsample %d $s %d\n" printf(fmtStr, debug_tsc, util_ctr); fired := fire } } destNodes.map(_.in(0)).foreach { case (in, edge) => in.flit.map { f => sample(f.fire, s"${edge.cp.srcId} $nodeId") } } ingressNodes.map(_.in(0)).foreach { case (in, edge) => sample(in.flit.fire, s"i${edge.cp.asInstanceOf[IngressChannelParams].ingressId} $nodeId") } egressNodes.map(_.out(0)).foreach { case (out, edge) => sample(out.flit.fire, s"$nodeId e${edge.cp.asInstanceOf[EgressChannelParams].egressId}") } } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } }
module Router_23( // @[Router.scala:89:25] input clock, // @[Router.scala:89:25] input reset, // @[Router.scala:89:25] output auto_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25] output auto_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25] output auto_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25] output auto_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25] output auto_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25] output auto_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25] input auto_egress_nodes_out_flit_ready, // @[LazyModuleImp.scala:107:25] output auto_egress_nodes_out_flit_valid, // @[LazyModuleImp.scala:107:25] output auto_egress_nodes_out_flit_bits_head, // @[LazyModuleImp.scala:107:25] output auto_egress_nodes_out_flit_bits_tail, // @[LazyModuleImp.scala:107:25] output [36:0] auto_egress_nodes_out_flit_bits_payload, // @[LazyModuleImp.scala:107:25] output auto_ingress_nodes_in_flit_ready, // @[LazyModuleImp.scala:107:25] input auto_ingress_nodes_in_flit_valid, // @[LazyModuleImp.scala:107:25] input auto_ingress_nodes_in_flit_bits_head, // @[LazyModuleImp.scala:107:25] input auto_ingress_nodes_in_flit_bits_tail, // @[LazyModuleImp.scala:107:25] input [36:0] auto_ingress_nodes_in_flit_bits_payload, // @[LazyModuleImp.scala:107:25] input [4:0] auto_ingress_nodes_in_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_2_flit_0_valid, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] output [36:0] auto_source_nodes_out_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_source_nodes_out_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_source_nodes_out_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] input [1:0] auto_source_nodes_out_2_credit_return, // @[LazyModuleImp.scala:107:25] input [1:0] auto_source_nodes_out_2_vc_free, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_1_flit_0_valid, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] output [36:0] auto_source_nodes_out_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_source_nodes_out_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_source_nodes_out_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] input [1:0] auto_source_nodes_out_1_credit_return, // @[LazyModuleImp.scala:107:25] input [1:0] auto_source_nodes_out_1_vc_free, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_0_flit_0_valid, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] output [36:0] auto_source_nodes_out_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_source_nodes_out_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_source_nodes_out_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_source_nodes_out_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] output auto_source_nodes_out_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] input [1:0] auto_source_nodes_out_0_credit_return, // @[LazyModuleImp.scala:107:25] input [1:0] auto_source_nodes_out_0_vc_free, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_1_flit_0_valid, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] input [36:0] auto_dest_nodes_in_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_dest_nodes_in_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] output [1:0] auto_dest_nodes_in_1_credit_return, // @[LazyModuleImp.scala:107:25] output [1:0] auto_dest_nodes_in_1_vc_free, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_0_flit_0_valid, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] input [36:0] auto_dest_nodes_in_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dest_nodes_in_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_dest_nodes_in_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] input auto_dest_nodes_in_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] output [1:0] auto_dest_nodes_in_0_credit_return, // @[LazyModuleImp.scala:107:25] output [1:0] auto_dest_nodes_in_0_vc_free // @[LazyModuleImp.scala:107:25] ); wire [19:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire _route_computer_io_resp_1_vc_sel_2_0; // @[Router.scala:136:32] wire _route_computer_io_resp_1_vc_sel_2_1; // @[Router.scala:136:32] wire _route_computer_io_resp_1_vc_sel_1_0; // @[Router.scala:136:32] wire _route_computer_io_resp_1_vc_sel_1_1; // @[Router.scala:136:32] wire _route_computer_io_resp_1_vc_sel_0_0; // @[Router.scala:136:32] wire _route_computer_io_resp_1_vc_sel_0_1; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_2_0; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_2_1; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_1_0; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_1_1; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_0_0; // @[Router.scala:136:32] wire _route_computer_io_resp_0_vc_sel_0_1; // @[Router.scala:136:32] wire _vc_allocator_io_req_2_ready; // @[Router.scala:133:30] wire _vc_allocator_io_req_1_ready; // @[Router.scala:133:30] wire _vc_allocator_io_req_0_ready; // @[Router.scala:133:30] wire _vc_allocator_io_resp_2_vc_sel_3_0; // @[Router.scala:133:30] wire _vc_allocator_io_resp_2_vc_sel_2_0; // @[Router.scala:133:30] wire _vc_allocator_io_resp_2_vc_sel_2_1; // @[Router.scala:133:30] wire _vc_allocator_io_resp_2_vc_sel_1_0; // @[Router.scala:133:30] wire _vc_allocator_io_resp_2_vc_sel_1_1; // @[Router.scala:133:30] wire _vc_allocator_io_resp_2_vc_sel_0_0; // @[Router.scala:133:30] wire _vc_allocator_io_resp_2_vc_sel_0_1; // @[Router.scala:133:30] wire _vc_allocator_io_resp_1_vc_sel_3_0; // @[Router.scala:133:30] wire _vc_allocator_io_resp_1_vc_sel_0_1; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_3_0; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_2_1; // @[Router.scala:133:30] wire _vc_allocator_io_resp_0_vc_sel_0_1; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_3_0_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_2_1_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_1_0_alloc; // @[Router.scala:133:30] wire _vc_allocator_io_out_allocs_0_1_alloc; // @[Router.scala:133:30] wire _switch_allocator_io_req_2_0_ready; // @[Router.scala:132:34] wire _switch_allocator_io_req_1_0_ready; // @[Router.scala:132:34] wire _switch_allocator_io_req_0_0_ready; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_3_0_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_3_0_tail; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_2_1_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_1_0_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_credit_alloc_0_1_alloc; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_3_0_2_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_3_0_1_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_3_0_0_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_2_0_2_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_2_0_1_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_2_0_0_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_1_0_2_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_0_0_2_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_0_0_1_0; // @[Router.scala:132:34] wire _switch_allocator_io_switch_sel_0_0_0_0; // @[Router.scala:132:34] wire _switch_io_out_3_0_valid; // @[Router.scala:131:24] wire _switch_io_out_3_0_bits_head; // @[Router.scala:131:24] wire _switch_io_out_3_0_bits_tail; // @[Router.scala:131:24] wire [36:0] _switch_io_out_3_0_bits_payload; // @[Router.scala:131:24] wire [3:0] _switch_io_out_3_0_bits_flow_ingress_node; // @[Router.scala:131:24] wire [1:0] _switch_io_out_3_0_bits_flow_ingress_node_id; // @[Router.scala:131:24] wire _switch_io_out_2_0_valid; // @[Router.scala:131:24] wire _switch_io_out_2_0_bits_head; // @[Router.scala:131:24] wire _switch_io_out_2_0_bits_tail; // @[Router.scala:131:24] wire [36:0] _switch_io_out_2_0_bits_payload; // @[Router.scala:131:24] wire _switch_io_out_2_0_bits_flow_vnet_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_2_0_bits_flow_ingress_node; // @[Router.scala:131:24] wire [1:0] _switch_io_out_2_0_bits_flow_ingress_node_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_2_0_bits_flow_egress_node; // @[Router.scala:131:24] wire [1:0] _switch_io_out_2_0_bits_flow_egress_node_id; // @[Router.scala:131:24] wire _switch_io_out_2_0_bits_virt_channel_id; // @[Router.scala:131:24] wire _switch_io_out_1_0_valid; // @[Router.scala:131:24] wire _switch_io_out_1_0_bits_head; // @[Router.scala:131:24] wire _switch_io_out_1_0_bits_tail; // @[Router.scala:131:24] wire [36:0] _switch_io_out_1_0_bits_payload; // @[Router.scala:131:24] wire _switch_io_out_1_0_bits_flow_vnet_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_1_0_bits_flow_ingress_node; // @[Router.scala:131:24] wire [1:0] _switch_io_out_1_0_bits_flow_ingress_node_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_1_0_bits_flow_egress_node; // @[Router.scala:131:24] wire [1:0] _switch_io_out_1_0_bits_flow_egress_node_id; // @[Router.scala:131:24] wire _switch_io_out_1_0_bits_virt_channel_id; // @[Router.scala:131:24] wire _switch_io_out_0_0_valid; // @[Router.scala:131:24] wire _switch_io_out_0_0_bits_head; // @[Router.scala:131:24] wire _switch_io_out_0_0_bits_tail; // @[Router.scala:131:24] wire [36:0] _switch_io_out_0_0_bits_payload; // @[Router.scala:131:24] wire _switch_io_out_0_0_bits_flow_vnet_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_0_0_bits_flow_ingress_node; // @[Router.scala:131:24] wire [1:0] _switch_io_out_0_0_bits_flow_ingress_node_id; // @[Router.scala:131:24] wire [3:0] _switch_io_out_0_0_bits_flow_egress_node; // @[Router.scala:131:24] wire [1:0] _switch_io_out_0_0_bits_flow_egress_node_id; // @[Router.scala:131:24] wire _switch_io_out_0_0_bits_virt_channel_id; // @[Router.scala:131:24] wire _egress_unit_3_to_6_io_credit_available_0; // @[Router.scala:125:13] wire _egress_unit_3_to_6_io_channel_status_0_occupied; // @[Router.scala:125:13] wire _egress_unit_3_to_6_io_out_valid; // @[Router.scala:125:13] wire _output_unit_2_to_11_io_credit_available_1; // @[Router.scala:122:13] wire _output_unit_2_to_11_io_channel_status_1_occupied; // @[Router.scala:122:13] wire _output_unit_1_to_6_io_credit_available_0; // @[Router.scala:122:13] wire _output_unit_1_to_6_io_channel_status_0_occupied; // @[Router.scala:122:13] wire _output_unit_0_to_3_io_credit_available_1; // @[Router.scala:122:13] wire _output_unit_0_to_3_io_channel_status_1_occupied; // @[Router.scala:122:13] wire _ingress_unit_2_from_6_io_vcalloc_req_valid; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_salloc_req_0_valid; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_salloc_req_0_bits_vc_sel_2_0; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_salloc_req_0_bits_vc_sel_1_0; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_salloc_req_0_bits_vc_sel_1_1; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_salloc_req_0_bits_vc_sel_0_0; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_salloc_req_0_bits_tail; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_out_0_valid; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_out_0_bits_flit_head; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_out_0_bits_flit_tail; // @[Router.scala:116:13] wire [36:0] _ingress_unit_2_from_6_io_out_0_bits_flit_payload; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:116:13] wire [3:0] _ingress_unit_2_from_6_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:116:13] wire [1:0] _ingress_unit_2_from_6_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:116:13] wire [3:0] _ingress_unit_2_from_6_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:116:13] wire [1:0] _ingress_unit_2_from_6_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_out_0_bits_out_virt_channel; // @[Router.scala:116:13] wire _ingress_unit_2_from_6_io_in_ready; // @[Router.scala:116:13] wire _input_unit_1_from_11_io_router_req_bits_src_virt_id; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13] wire [3:0] _input_unit_1_from_11_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_1_from_11_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_1_from_11_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_1_from_11_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_vcalloc_req_valid; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_salloc_req_0_valid; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_salloc_req_0_bits_tail; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_out_0_valid; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_out_0_bits_flit_head; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_out_0_bits_flit_tail; // @[Router.scala:112:13] wire [36:0] _input_unit_1_from_11_io_out_0_bits_flit_payload; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13] wire [3:0] _input_unit_1_from_11_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_1_from_11_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_1_from_11_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_1_from_11_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13] wire _input_unit_1_from_11_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_router_req_bits_src_virt_id; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13] wire [3:0] _input_unit_0_from_6_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_0_from_6_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_0_from_6_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_0_from_6_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_vcalloc_req_valid; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_vcalloc_req_bits_vc_sel_3_0; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_vcalloc_req_bits_vc_sel_2_0; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_vcalloc_req_bits_vc_sel_2_1; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_vcalloc_req_bits_vc_sel_1_0; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_vcalloc_req_bits_vc_sel_1_1; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_vcalloc_req_bits_vc_sel_0_0; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_vcalloc_req_bits_vc_sel_0_1; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_salloc_req_0_valid; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_salloc_req_0_bits_vc_sel_3_0; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_salloc_req_0_bits_vc_sel_2_1; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_salloc_req_0_bits_vc_sel_0_1; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_salloc_req_0_bits_tail; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_out_0_valid; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_out_0_bits_flit_head; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_out_0_bits_flit_tail; // @[Router.scala:112:13] wire [36:0] _input_unit_0_from_6_io_out_0_bits_flit_payload; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13] wire [3:0] _input_unit_0_from_6_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_0_from_6_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13] wire [3:0] _input_unit_0_from_6_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13] wire [1:0] _input_unit_0_from_6_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13] wire _input_unit_0_from_6_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13] wire [1:0] fires_count = {1'h0, _vc_allocator_io_req_0_ready & _input_unit_0_from_6_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_1_ready & _input_unit_1_from_11_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_2_ready & _ingress_unit_2_from_6_io_vcalloc_req_valid}; // @[Decoupled.scala:51:35] reg [63:0] debug_tsc; // @[Router.scala:195:28] reg [63:0] debug_sample; // @[Router.scala:197:31] wire _GEN = debug_sample == {44'h0, _plusarg_reader_out - 20'h1}; // @[PlusArg.scala:80:11] reg [63:0] util_ctr; // @[Router.scala:203:29] reg fired; // @[Router.scala:204:26] wire _GEN_0 = (|_plusarg_reader_out) & _GEN; // @[PlusArg.scala:80:11] wire _GEN_1 = _GEN_0 & fired; // @[Router.scala:204:26, :207:{33,71}] reg [63:0] util_ctr_1; // @[Router.scala:203:29] reg fired_1; // @[Router.scala:204:26] wire _GEN_2 = _GEN_0 & fired_1; // @[Router.scala:204:26, :207:{33,71}] reg [63:0] util_ctr_2; // @[Router.scala:203:29] reg fired_2; // @[Router.scala:204:26] wire _GEN_3 = _GEN_0 & fired_2; // @[Router.scala:204:26, :207:{33,71}] reg [63:0] util_ctr_3; // @[Router.scala:203:29] reg fired_3; // @[Router.scala:204:26] wire _GEN_4 = _GEN_0 & fired_3; // @[Router.scala:204:26, :207:{33,71}]
Generate the Verilog code corresponding to the following Chisel files. File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Replacement.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import freechips.rocketchip.util.property.cover abstract class ReplacementPolicy { def nBits: Int def perSet: Boolean def way: UInt def miss: Unit def hit: Unit def access(touch_way: UInt): Unit def access(touch_ways: Seq[Valid[UInt]]): Unit def state_read: UInt def get_next_state(state: UInt, touch_way: UInt): UInt def get_next_state(state: UInt, touch_ways: Seq[Valid[UInt]]): UInt = { touch_ways.foldLeft(state)((prev, touch_way) => Mux(touch_way.valid, get_next_state(prev, touch_way.bits), prev)) } def get_replace_way(state: UInt): UInt } object ReplacementPolicy { def fromString(s: String, n_ways: Int): ReplacementPolicy = s.toLowerCase match { case "random" => new RandomReplacement(n_ways) case "lru" => new TrueLRU(n_ways) case "plru" => new PseudoLRU(n_ways) case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t") } } class RandomReplacement(n_ways: Int) extends ReplacementPolicy { private val replace = Wire(Bool()) replace := false.B def nBits = 16 def perSet = false private val lfsr = LFSR(nBits, replace) def state_read = WireDefault(lfsr) def way = Random(n_ways, lfsr) def miss = replace := true.B def hit = {} def access(touch_way: UInt) = {} def access(touch_ways: Seq[Valid[UInt]]) = {} def get_next_state(state: UInt, touch_way: UInt) = 0.U //DontCare def get_replace_way(state: UInt) = way } abstract class SeqReplacementPolicy { def access(set: UInt): Unit def update(valid: Bool, hit: Bool, set: UInt, way: UInt): Unit def way: UInt } abstract class SetAssocReplacementPolicy { def access(set: UInt, touch_way: UInt): Unit def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]): Unit def way(set: UInt): UInt } class SeqRandom(n_ways: Int) extends SeqReplacementPolicy { val logic = new RandomReplacement(n_ways) def access(set: UInt) = { } def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = { when (valid && !hit) { logic.miss } } def way = logic.way } class TrueLRU(n_ways: Int) extends ReplacementPolicy { // True LRU replacement policy, using a triangular matrix to track which sets are more recently used than others. // The matrix is packed into a single UInt (or Bits). Example 4-way (6-bits): // [5] - 3 more recent than 2 // [4] - 3 more recent than 1 // [3] - 2 more recent than 1 // [2] - 3 more recent than 0 // [1] - 2 more recent than 0 // [0] - 1 more recent than 0 def nBits = (n_ways * (n_ways-1)) / 2 def perSet = true private val state_reg = RegInit(0.U(nBits.W)) def state_read = WireDefault(state_reg) private def extractMRUVec(state: UInt): Seq[UInt] = { // Extract per-way information about which higher-indexed ways are more recently used val moreRecentVec = Wire(Vec(n_ways-1, UInt(n_ways.W))) var lsb = 0 for (i <- 0 until n_ways-1) { moreRecentVec(i) := Cat(state(lsb+n_ways-i-2,lsb), 0.U((i+1).W)) lsb = lsb + (n_ways - i - 1) } moreRecentVec } def get_next_state(state: UInt, touch_way: UInt): UInt = { val nextState = Wire(Vec(n_ways-1, UInt(n_ways.W))) val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix val wayDec = UIntToOH(touch_way, n_ways) // Compute next value of triangular matrix // set the touched way as more recent than every other way nextState.zipWithIndex.map { case (e, i) => e := Mux(i.U === touch_way, 0.U(n_ways.W), moreRecentVec(i) | wayDec) } nextState.zipWithIndex.tail.foldLeft((nextState.head.apply(n_ways-1,1),0)) { case ((pe,pi),(ce,ci)) => (Cat(ce.apply(n_ways-1,ci+1), pe), ci) }._1 } def access(touch_way: UInt): Unit = { state_reg := get_next_state(state_reg, touch_way) } def access(touch_ways: Seq[Valid[UInt]]): Unit = { when (touch_ways.map(_.valid).orR) { state_reg := get_next_state(state_reg, touch_ways) } for (i <- 1 until touch_ways.size) { cover(PopCount(touch_ways.map(_.valid)) === i.U, s"LRU_UpdateCount$i", s"LRU Update $i simultaneous") } } def get_replace_way(state: UInt): UInt = { val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix // For each way, determine if all other ways are more recent val mruWayDec = (0 until n_ways).map { i => val upperMoreRecent = (if (i == n_ways-1) true.B else moreRecentVec(i).apply(n_ways-1,i+1).andR) val lowerMoreRecent = (if (i == 0) true.B else moreRecentVec.map(e => !e(i)).reduce(_ && _)) upperMoreRecent && lowerMoreRecent } OHToUInt(mruWayDec) } def way = get_replace_way(state_reg) def miss = access(way) def hit = {} @deprecated("replace 'replace' with 'way' from abstract class ReplacementPolicy","Rocket Chip 2020.05") def replace: UInt = way } class PseudoLRU(n_ways: Int) extends ReplacementPolicy { // Pseudo-LRU tree algorithm: https://en.wikipedia.org/wiki/Pseudo-LRU#Tree-PLRU // // // - bits storage example for 4-way PLRU binary tree: // bit[2]: ways 3+2 older than ways 1+0 // / \ // bit[1]: way 3 older than way 2 bit[0]: way 1 older than way 0 // // // - bits storage example for 3-way PLRU binary tree: // bit[1]: way 2 older than ways 1+0 // \ // bit[0]: way 1 older than way 0 // // // - bits storage example for 8-way PLRU binary tree: // bit[6]: ways 7-4 older than ways 3-0 // / \ // bit[5]: ways 7+6 > 5+4 bit[2]: ways 3+2 > 1+0 // / \ / \ // bit[4]: way 7>6 bit[3]: way 5>4 bit[1]: way 3>2 bit[0]: way 1>0 def nBits = n_ways - 1 def perSet = true private val state_reg = if (nBits == 0) Reg(UInt(0.W)) else RegInit(0.U(nBits.W)) def state_read = WireDefault(state_reg) def access(touch_way: UInt): Unit = { state_reg := get_next_state(state_reg, touch_way) } def access(touch_ways: Seq[Valid[UInt]]): Unit = { when (touch_ways.map(_.valid).orR) { state_reg := get_next_state(state_reg, touch_ways) } for (i <- 1 until touch_ways.size) { cover(PopCount(touch_ways.map(_.valid)) === i.U, s"PLRU_UpdateCount$i", s"PLRU Update $i simultaneous") } } /** @param state state_reg bits for this sub-tree * @param touch_way touched way encoded value bits for this sub-tree * @param tree_nways number of ways in this sub-tree */ def get_next_state(state: UInt, touch_way: UInt, tree_nways: Int): UInt = { require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways") require(touch_way.getWidth == (log2Ceil(tree_nways) max 1), s"wrong encoded way width ${touch_way.getWidth} for $tree_nways ways") if (tree_nways > 2) { // we are at a branching node in the tree, so recurse val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree val set_left_older = !touch_way(log2Ceil(tree_nways)-1) val left_subtree_state = state.extract(tree_nways-3, right_nways-1) val right_subtree_state = state(right_nways-2, 0) if (left_nways > 1) { // we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees Cat(set_left_older, Mux(set_left_older, left_subtree_state, // if setting left sub-tree as older, do NOT recurse into left sub-tree get_next_state(left_subtree_state, touch_way.extract(log2Ceil(left_nways)-1,0), left_nways)), // recurse left if newer Mux(set_left_older, get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree } else { // we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree Cat(set_left_older, Mux(set_left_older, get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree } } else if (tree_nways == 2) { // we are at a leaf node at the end of the tree, so set the single state bit opposite of the lsb of the touched way encoded value !touch_way(0) } else { // tree_nways <= 1 // we are at an empty node in an empty tree for 1 way, so return single zero bit for Chisel (no zero-width wires) 0.U(1.W) } } def get_next_state(state: UInt, touch_way: UInt): UInt = { val touch_way_sized = if (touch_way.getWidth < log2Ceil(n_ways)) touch_way.padTo (log2Ceil(n_ways)) else touch_way.extract(log2Ceil(n_ways)-1,0) get_next_state(state, touch_way_sized, n_ways) } /** @param state state_reg bits for this sub-tree * @param tree_nways number of ways in this sub-tree */ def get_replace_way(state: UInt, tree_nways: Int): UInt = { require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways") // this algorithm recursively descends the binary tree, filling in the way-to-replace encoded value from msb to lsb if (tree_nways > 2) { // we are at a branching node in the tree, so recurse val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree val left_subtree_older = state(tree_nways-2) val left_subtree_state = state.extract(tree_nways-3, right_nways-1) val right_subtree_state = state(right_nways-2, 0) if (left_nways > 1) { // we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value Mux(left_subtree_older, // if left sub-tree is older, recurse left, else recurse right get_replace_way(left_subtree_state, left_nways), // recurse left get_replace_way(right_subtree_state, right_nways))) // recurse right } else { // we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value Mux(left_subtree_older, // if left sub-tree is older, return and do not recurse right 0.U(1.W), get_replace_way(right_subtree_state, right_nways))) // recurse right } } else if (tree_nways == 2) { // we are at a leaf node at the end of the tree, so just return the single state bit as lsb of the way-to-replace encoded value state(0) } else { // tree_nways <= 1 // we are at an empty node in an unbalanced tree for non-power-of-2 ways, so return single zero bit as lsb of the way-to-replace encoded value 0.U(1.W) } } def get_replace_way(state: UInt): UInt = get_replace_way(state, n_ways) def way = get_replace_way(state_reg) def miss = access(way) def hit = {} } class SeqPLRU(n_sets: Int, n_ways: Int) extends SeqReplacementPolicy { val logic = new PseudoLRU(n_ways) val state = SyncReadMem(n_sets, UInt(logic.nBits.W)) val current_state = Wire(UInt(logic.nBits.W)) val next_state = Wire(UInt(logic.nBits.W)) val plru_way = logic.get_replace_way(current_state) def access(set: UInt) = { current_state := state.read(set) } def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = { val update_way = Mux(hit, way, plru_way) next_state := logic.get_next_state(current_state, update_way) when (valid) { state.write(set, next_state) } } def way = plru_way } class SetAssocLRU(n_sets: Int, n_ways: Int, policy: String) extends SetAssocReplacementPolicy { val logic = policy.toLowerCase match { case "plru" => new PseudoLRU(n_ways) case "lru" => new TrueLRU(n_ways) case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t") } val state_vec = if (logic.nBits == 0) Reg(Vec(n_sets, UInt(logic.nBits.W))) // Work around elaboration error on following line else RegInit(VecInit(Seq.fill(n_sets)(0.U(logic.nBits.W)))) def access(set: UInt, touch_way: UInt) = { state_vec(set) := logic.get_next_state(state_vec(set), touch_way) } def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]) = { require(sets.size == touch_ways.size, "internal consistency check: should be same number of simultaneous updates for sets and touch_ways") for (set <- 0 until n_sets) { val set_touch_ways = (sets zip touch_ways).map { case (touch_set, touch_way) => Pipe(touch_way.valid && (touch_set === set.U), touch_way.bits, 0)} when (set_touch_ways.map(_.valid).orR) { state_vec(set) := logic.get_next_state(state_vec(set), set_touch_ways) } } } def way(set: UInt) = logic.get_replace_way(state_vec(set)) } // Synthesizable unit tests import freechips.rocketchip.unittest._ class PLRUTest(n_ways: Int, timeout: Int = 500) extends UnitTest(timeout) { val plru = new PseudoLRU(n_ways) // step io.finished := RegNext(true.B, false.B) val get_replace_ways = (0 until (1 << (n_ways-1))).map(state => plru.get_replace_way(state = state.U((n_ways-1).W))) val get_next_states = (0 until (1 << (n_ways-1))).map(state => (0 until n_ways).map(way => plru.get_next_state (state = state.U((n_ways-1).W), touch_way = way.U(log2Ceil(n_ways).W)))) n_ways match { case 2 => { assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0)) assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1)) assert(get_next_states(0)(0) === 1.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=1 actual=%d", get_next_states(0)(0)) assert(get_next_states(0)(1) === 0.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=0 actual=%d", get_next_states(0)(1)) assert(get_next_states(1)(0) === 1.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=1 actual=%d", get_next_states(1)(0)) assert(get_next_states(1)(1) === 0.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=0 actual=%d", get_next_states(1)(1)) } case 3 => { assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0)) assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1)) assert(get_replace_ways(2) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=2 actual=%d", get_replace_ways(2)) assert(get_replace_ways(3) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=2 actual=%d", get_replace_ways(3)) assert(get_next_states(0)(0) === 3.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=3 actual=%d", get_next_states(0)(0)) assert(get_next_states(0)(1) === 2.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=2 actual=%d", get_next_states(0)(1)) assert(get_next_states(0)(2) === 0.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=0 actual=%d", get_next_states(0)(2)) assert(get_next_states(1)(0) === 3.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=3 actual=%d", get_next_states(1)(0)) assert(get_next_states(1)(1) === 2.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=2 actual=%d", get_next_states(1)(1)) assert(get_next_states(1)(2) === 1.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=1 actual=%d", get_next_states(1)(2)) assert(get_next_states(2)(0) === 3.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=3 actual=%d", get_next_states(2)(0)) assert(get_next_states(2)(1) === 2.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=2 actual=%d", get_next_states(2)(1)) assert(get_next_states(2)(2) === 0.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=0 actual=%d", get_next_states(2)(2)) assert(get_next_states(3)(0) === 3.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=3 actual=%d", get_next_states(3)(0)) assert(get_next_states(3)(1) === 2.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=2 actual=%d", get_next_states(3)(1)) assert(get_next_states(3)(2) === 1.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=1 actual=%d", get_next_states(3)(2)) } case 4 => { assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0)) assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1)) assert(get_replace_ways(2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=0 actual=%d", get_replace_ways(2)) assert(get_replace_ways(3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=1 actual=%d", get_replace_ways(3)) assert(get_replace_ways(4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=4: expected=2 actual=%d", get_replace_ways(4)) assert(get_replace_ways(5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=5: expected=2 actual=%d", get_replace_ways(5)) assert(get_replace_ways(6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=6: expected=3 actual=%d", get_replace_ways(6)) assert(get_replace_ways(7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=7: expected=3 actual=%d", get_replace_ways(7)) assert(get_next_states(0)(0) === 5.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=5 actual=%d", get_next_states(0)(0)) assert(get_next_states(0)(1) === 4.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=4 actual=%d", get_next_states(0)(1)) assert(get_next_states(0)(2) === 2.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=2 actual=%d", get_next_states(0)(2)) assert(get_next_states(0)(3) === 0.U(plru.nBits.W), s"get_next_state state=0 way=3: expected=0 actual=%d", get_next_states(0)(3)) assert(get_next_states(1)(0) === 5.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=5 actual=%d", get_next_states(1)(0)) assert(get_next_states(1)(1) === 4.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=4 actual=%d", get_next_states(1)(1)) assert(get_next_states(1)(2) === 3.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=3 actual=%d", get_next_states(1)(2)) assert(get_next_states(1)(3) === 1.U(plru.nBits.W), s"get_next_state state=1 way=3: expected=1 actual=%d", get_next_states(1)(3)) assert(get_next_states(2)(0) === 7.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=7 actual=%d", get_next_states(2)(0)) assert(get_next_states(2)(1) === 6.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=6 actual=%d", get_next_states(2)(1)) assert(get_next_states(2)(2) === 2.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=2 actual=%d", get_next_states(2)(2)) assert(get_next_states(2)(3) === 0.U(plru.nBits.W), s"get_next_state state=2 way=3: expected=0 actual=%d", get_next_states(2)(3)) assert(get_next_states(3)(0) === 7.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=7 actual=%d", get_next_states(3)(0)) assert(get_next_states(3)(1) === 6.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=6 actual=%d", get_next_states(3)(1)) assert(get_next_states(3)(2) === 3.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=3 actual=%d", get_next_states(3)(2)) assert(get_next_states(3)(3) === 1.U(plru.nBits.W), s"get_next_state state=3 way=3: expected=1 actual=%d", get_next_states(3)(3)) assert(get_next_states(4)(0) === 5.U(plru.nBits.W), s"get_next_state state=4 way=0: expected=5 actual=%d", get_next_states(4)(0)) assert(get_next_states(4)(1) === 4.U(plru.nBits.W), s"get_next_state state=4 way=1: expected=4 actual=%d", get_next_states(4)(1)) assert(get_next_states(4)(2) === 2.U(plru.nBits.W), s"get_next_state state=4 way=2: expected=2 actual=%d", get_next_states(4)(2)) assert(get_next_states(4)(3) === 0.U(plru.nBits.W), s"get_next_state state=4 way=3: expected=0 actual=%d", get_next_states(4)(3)) assert(get_next_states(5)(0) === 5.U(plru.nBits.W), s"get_next_state state=5 way=0: expected=5 actual=%d", get_next_states(5)(0)) assert(get_next_states(5)(1) === 4.U(plru.nBits.W), s"get_next_state state=5 way=1: expected=4 actual=%d", get_next_states(5)(1)) assert(get_next_states(5)(2) === 3.U(plru.nBits.W), s"get_next_state state=5 way=2: expected=3 actual=%d", get_next_states(5)(2)) assert(get_next_states(5)(3) === 1.U(plru.nBits.W), s"get_next_state state=5 way=3: expected=1 actual=%d", get_next_states(5)(3)) assert(get_next_states(6)(0) === 7.U(plru.nBits.W), s"get_next_state state=6 way=0: expected=7 actual=%d", get_next_states(6)(0)) assert(get_next_states(6)(1) === 6.U(plru.nBits.W), s"get_next_state state=6 way=1: expected=6 actual=%d", get_next_states(6)(1)) assert(get_next_states(6)(2) === 2.U(plru.nBits.W), s"get_next_state state=6 way=2: expected=2 actual=%d", get_next_states(6)(2)) assert(get_next_states(6)(3) === 0.U(plru.nBits.W), s"get_next_state state=6 way=3: expected=0 actual=%d", get_next_states(6)(3)) assert(get_next_states(7)(0) === 7.U(plru.nBits.W), s"get_next_state state=7 way=0: expected=7 actual=%d", get_next_states(7)(0)) assert(get_next_states(7)(1) === 6.U(plru.nBits.W), s"get_next_state state=7 way=5: expected=6 actual=%d", get_next_states(7)(1)) assert(get_next_states(7)(2) === 3.U(plru.nBits.W), s"get_next_state state=7 way=2: expected=3 actual=%d", get_next_states(7)(2)) assert(get_next_states(7)(3) === 1.U(plru.nBits.W), s"get_next_state state=7 way=3: expected=1 actual=%d", get_next_states(7)(3)) } case 5 => { assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0)) assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1)) assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2)) assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3)) assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4)) assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5)) assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6)) assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7)) assert(get_replace_ways( 8) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=4 actual=%d", get_replace_ways( 8)) assert(get_replace_ways( 9) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=4 actual=%d", get_replace_ways( 9)) assert(get_replace_ways(10) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=4 actual=%d", get_replace_ways(10)) assert(get_replace_ways(11) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=4 actual=%d", get_replace_ways(11)) assert(get_replace_ways(12) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=4 actual=%d", get_replace_ways(12)) assert(get_replace_ways(13) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=4 actual=%d", get_replace_ways(13)) assert(get_replace_ways(14) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=4 actual=%d", get_replace_ways(14)) assert(get_replace_ways(15) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=4 actual=%d", get_replace_ways(15)) assert(get_next_states( 0)(0) === 13.U(plru.nBits.W), s"get_next_state state=00 way=0: expected=13 actual=%d", get_next_states( 0)(0)) assert(get_next_states( 0)(1) === 12.U(plru.nBits.W), s"get_next_state state=00 way=1: expected=12 actual=%d", get_next_states( 0)(1)) assert(get_next_states( 0)(2) === 10.U(plru.nBits.W), s"get_next_state state=00 way=2: expected=10 actual=%d", get_next_states( 0)(2)) assert(get_next_states( 0)(3) === 8.U(plru.nBits.W), s"get_next_state state=00 way=3: expected=08 actual=%d", get_next_states( 0)(3)) assert(get_next_states( 0)(4) === 0.U(plru.nBits.W), s"get_next_state state=00 way=4: expected=00 actual=%d", get_next_states( 0)(4)) assert(get_next_states( 1)(0) === 13.U(plru.nBits.W), s"get_next_state state=01 way=0: expected=13 actual=%d", get_next_states( 1)(0)) assert(get_next_states( 1)(1) === 12.U(plru.nBits.W), s"get_next_state state=01 way=1: expected=12 actual=%d", get_next_states( 1)(1)) assert(get_next_states( 1)(2) === 11.U(plru.nBits.W), s"get_next_state state=01 way=2: expected=11 actual=%d", get_next_states( 1)(2)) assert(get_next_states( 1)(3) === 9.U(plru.nBits.W), s"get_next_state state=01 way=3: expected=09 actual=%d", get_next_states( 1)(3)) assert(get_next_states( 1)(4) === 1.U(plru.nBits.W), s"get_next_state state=01 way=4: expected=01 actual=%d", get_next_states( 1)(4)) assert(get_next_states( 2)(0) === 15.U(plru.nBits.W), s"get_next_state state=02 way=0: expected=15 actual=%d", get_next_states( 2)(0)) assert(get_next_states( 2)(1) === 14.U(plru.nBits.W), s"get_next_state state=02 way=1: expected=14 actual=%d", get_next_states( 2)(1)) assert(get_next_states( 2)(2) === 10.U(plru.nBits.W), s"get_next_state state=02 way=2: expected=10 actual=%d", get_next_states( 2)(2)) assert(get_next_states( 2)(3) === 8.U(plru.nBits.W), s"get_next_state state=02 way=3: expected=08 actual=%d", get_next_states( 2)(3)) assert(get_next_states( 2)(4) === 2.U(plru.nBits.W), s"get_next_state state=02 way=4: expected=02 actual=%d", get_next_states( 2)(4)) assert(get_next_states( 3)(0) === 15.U(plru.nBits.W), s"get_next_state state=03 way=0: expected=15 actual=%d", get_next_states( 3)(0)) assert(get_next_states( 3)(1) === 14.U(plru.nBits.W), s"get_next_state state=03 way=1: expected=14 actual=%d", get_next_states( 3)(1)) assert(get_next_states( 3)(2) === 11.U(plru.nBits.W), s"get_next_state state=03 way=2: expected=11 actual=%d", get_next_states( 3)(2)) assert(get_next_states( 3)(3) === 9.U(plru.nBits.W), s"get_next_state state=03 way=3: expected=09 actual=%d", get_next_states( 3)(3)) assert(get_next_states( 3)(4) === 3.U(plru.nBits.W), s"get_next_state state=03 way=4: expected=03 actual=%d", get_next_states( 3)(4)) assert(get_next_states( 4)(0) === 13.U(plru.nBits.W), s"get_next_state state=04 way=0: expected=13 actual=%d", get_next_states( 4)(0)) assert(get_next_states( 4)(1) === 12.U(plru.nBits.W), s"get_next_state state=04 way=1: expected=12 actual=%d", get_next_states( 4)(1)) assert(get_next_states( 4)(2) === 10.U(plru.nBits.W), s"get_next_state state=04 way=2: expected=10 actual=%d", get_next_states( 4)(2)) assert(get_next_states( 4)(3) === 8.U(plru.nBits.W), s"get_next_state state=04 way=3: expected=08 actual=%d", get_next_states( 4)(3)) assert(get_next_states( 4)(4) === 4.U(plru.nBits.W), s"get_next_state state=04 way=4: expected=04 actual=%d", get_next_states( 4)(4)) assert(get_next_states( 5)(0) === 13.U(plru.nBits.W), s"get_next_state state=05 way=0: expected=13 actual=%d", get_next_states( 5)(0)) assert(get_next_states( 5)(1) === 12.U(plru.nBits.W), s"get_next_state state=05 way=1: expected=12 actual=%d", get_next_states( 5)(1)) assert(get_next_states( 5)(2) === 11.U(plru.nBits.W), s"get_next_state state=05 way=2: expected=11 actual=%d", get_next_states( 5)(2)) assert(get_next_states( 5)(3) === 9.U(plru.nBits.W), s"get_next_state state=05 way=3: expected=09 actual=%d", get_next_states( 5)(3)) assert(get_next_states( 5)(4) === 5.U(plru.nBits.W), s"get_next_state state=05 way=4: expected=05 actual=%d", get_next_states( 5)(4)) assert(get_next_states( 6)(0) === 15.U(plru.nBits.W), s"get_next_state state=06 way=0: expected=15 actual=%d", get_next_states( 6)(0)) assert(get_next_states( 6)(1) === 14.U(plru.nBits.W), s"get_next_state state=06 way=1: expected=14 actual=%d", get_next_states( 6)(1)) assert(get_next_states( 6)(2) === 10.U(plru.nBits.W), s"get_next_state state=06 way=2: expected=10 actual=%d", get_next_states( 6)(2)) assert(get_next_states( 6)(3) === 8.U(plru.nBits.W), s"get_next_state state=06 way=3: expected=08 actual=%d", get_next_states( 6)(3)) assert(get_next_states( 6)(4) === 6.U(plru.nBits.W), s"get_next_state state=06 way=4: expected=06 actual=%d", get_next_states( 6)(4)) assert(get_next_states( 7)(0) === 15.U(plru.nBits.W), s"get_next_state state=07 way=0: expected=15 actual=%d", get_next_states( 7)(0)) assert(get_next_states( 7)(1) === 14.U(plru.nBits.W), s"get_next_state state=07 way=5: expected=14 actual=%d", get_next_states( 7)(1)) assert(get_next_states( 7)(2) === 11.U(plru.nBits.W), s"get_next_state state=07 way=2: expected=11 actual=%d", get_next_states( 7)(2)) assert(get_next_states( 7)(3) === 9.U(plru.nBits.W), s"get_next_state state=07 way=3: expected=09 actual=%d", get_next_states( 7)(3)) assert(get_next_states( 7)(4) === 7.U(plru.nBits.W), s"get_next_state state=07 way=4: expected=07 actual=%d", get_next_states( 7)(4)) assert(get_next_states( 8)(0) === 13.U(plru.nBits.W), s"get_next_state state=08 way=0: expected=13 actual=%d", get_next_states( 8)(0)) assert(get_next_states( 8)(1) === 12.U(plru.nBits.W), s"get_next_state state=08 way=1: expected=12 actual=%d", get_next_states( 8)(1)) assert(get_next_states( 8)(2) === 10.U(plru.nBits.W), s"get_next_state state=08 way=2: expected=10 actual=%d", get_next_states( 8)(2)) assert(get_next_states( 8)(3) === 8.U(plru.nBits.W), s"get_next_state state=08 way=3: expected=08 actual=%d", get_next_states( 8)(3)) assert(get_next_states( 8)(4) === 0.U(plru.nBits.W), s"get_next_state state=08 way=4: expected=00 actual=%d", get_next_states( 8)(4)) assert(get_next_states( 9)(0) === 13.U(plru.nBits.W), s"get_next_state state=09 way=0: expected=13 actual=%d", get_next_states( 9)(0)) assert(get_next_states( 9)(1) === 12.U(plru.nBits.W), s"get_next_state state=09 way=1: expected=12 actual=%d", get_next_states( 9)(1)) assert(get_next_states( 9)(2) === 11.U(plru.nBits.W), s"get_next_state state=09 way=2: expected=11 actual=%d", get_next_states( 9)(2)) assert(get_next_states( 9)(3) === 9.U(plru.nBits.W), s"get_next_state state=09 way=3: expected=09 actual=%d", get_next_states( 9)(3)) assert(get_next_states( 9)(4) === 1.U(plru.nBits.W), s"get_next_state state=09 way=4: expected=01 actual=%d", get_next_states( 9)(4)) assert(get_next_states(10)(0) === 15.U(plru.nBits.W), s"get_next_state state=10 way=0: expected=15 actual=%d", get_next_states(10)(0)) assert(get_next_states(10)(1) === 14.U(plru.nBits.W), s"get_next_state state=10 way=1: expected=14 actual=%d", get_next_states(10)(1)) assert(get_next_states(10)(2) === 10.U(plru.nBits.W), s"get_next_state state=10 way=2: expected=10 actual=%d", get_next_states(10)(2)) assert(get_next_states(10)(3) === 8.U(plru.nBits.W), s"get_next_state state=10 way=3: expected=08 actual=%d", get_next_states(10)(3)) assert(get_next_states(10)(4) === 2.U(plru.nBits.W), s"get_next_state state=10 way=4: expected=02 actual=%d", get_next_states(10)(4)) assert(get_next_states(11)(0) === 15.U(plru.nBits.W), s"get_next_state state=11 way=0: expected=15 actual=%d", get_next_states(11)(0)) assert(get_next_states(11)(1) === 14.U(plru.nBits.W), s"get_next_state state=11 way=1: expected=14 actual=%d", get_next_states(11)(1)) assert(get_next_states(11)(2) === 11.U(plru.nBits.W), s"get_next_state state=11 way=2: expected=11 actual=%d", get_next_states(11)(2)) assert(get_next_states(11)(3) === 9.U(plru.nBits.W), s"get_next_state state=11 way=3: expected=09 actual=%d", get_next_states(11)(3)) assert(get_next_states(11)(4) === 3.U(plru.nBits.W), s"get_next_state state=11 way=4: expected=03 actual=%d", get_next_states(11)(4)) assert(get_next_states(12)(0) === 13.U(plru.nBits.W), s"get_next_state state=12 way=0: expected=13 actual=%d", get_next_states(12)(0)) assert(get_next_states(12)(1) === 12.U(plru.nBits.W), s"get_next_state state=12 way=1: expected=12 actual=%d", get_next_states(12)(1)) assert(get_next_states(12)(2) === 10.U(plru.nBits.W), s"get_next_state state=12 way=2: expected=10 actual=%d", get_next_states(12)(2)) assert(get_next_states(12)(3) === 8.U(plru.nBits.W), s"get_next_state state=12 way=3: expected=08 actual=%d", get_next_states(12)(3)) assert(get_next_states(12)(4) === 4.U(plru.nBits.W), s"get_next_state state=12 way=4: expected=04 actual=%d", get_next_states(12)(4)) assert(get_next_states(13)(0) === 13.U(plru.nBits.W), s"get_next_state state=13 way=0: expected=13 actual=%d", get_next_states(13)(0)) assert(get_next_states(13)(1) === 12.U(plru.nBits.W), s"get_next_state state=13 way=1: expected=12 actual=%d", get_next_states(13)(1)) assert(get_next_states(13)(2) === 11.U(plru.nBits.W), s"get_next_state state=13 way=2: expected=11 actual=%d", get_next_states(13)(2)) assert(get_next_states(13)(3) === 9.U(plru.nBits.W), s"get_next_state state=13 way=3: expected=09 actual=%d", get_next_states(13)(3)) assert(get_next_states(13)(4) === 5.U(plru.nBits.W), s"get_next_state state=13 way=4: expected=05 actual=%d", get_next_states(13)(4)) assert(get_next_states(14)(0) === 15.U(plru.nBits.W), s"get_next_state state=14 way=0: expected=15 actual=%d", get_next_states(14)(0)) assert(get_next_states(14)(1) === 14.U(plru.nBits.W), s"get_next_state state=14 way=1: expected=14 actual=%d", get_next_states(14)(1)) assert(get_next_states(14)(2) === 10.U(plru.nBits.W), s"get_next_state state=14 way=2: expected=10 actual=%d", get_next_states(14)(2)) assert(get_next_states(14)(3) === 8.U(plru.nBits.W), s"get_next_state state=14 way=3: expected=08 actual=%d", get_next_states(14)(3)) assert(get_next_states(14)(4) === 6.U(plru.nBits.W), s"get_next_state state=14 way=4: expected=06 actual=%d", get_next_states(14)(4)) assert(get_next_states(15)(0) === 15.U(plru.nBits.W), s"get_next_state state=15 way=0: expected=15 actual=%d", get_next_states(15)(0)) assert(get_next_states(15)(1) === 14.U(plru.nBits.W), s"get_next_state state=15 way=5: expected=14 actual=%d", get_next_states(15)(1)) assert(get_next_states(15)(2) === 11.U(plru.nBits.W), s"get_next_state state=15 way=2: expected=11 actual=%d", get_next_states(15)(2)) assert(get_next_states(15)(3) === 9.U(plru.nBits.W), s"get_next_state state=15 way=3: expected=09 actual=%d", get_next_states(15)(3)) assert(get_next_states(15)(4) === 7.U(plru.nBits.W), s"get_next_state state=15 way=4: expected=07 actual=%d", get_next_states(15)(4)) } case 6 => { assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0)) assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1)) assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2)) assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3)) assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4)) assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5)) assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6)) assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7)) assert(get_replace_ways( 8) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=0 actual=%d", get_replace_ways( 8)) assert(get_replace_ways( 9) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=1 actual=%d", get_replace_ways( 9)) assert(get_replace_ways(10) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=0 actual=%d", get_replace_ways(10)) assert(get_replace_ways(11) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=1 actual=%d", get_replace_ways(11)) assert(get_replace_ways(12) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=2 actual=%d", get_replace_ways(12)) assert(get_replace_ways(13) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=2 actual=%d", get_replace_ways(13)) assert(get_replace_ways(14) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=3 actual=%d", get_replace_ways(14)) assert(get_replace_ways(15) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=3 actual=%d", get_replace_ways(15)) assert(get_replace_ways(16) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=16: expected=4 actual=%d", get_replace_ways(16)) assert(get_replace_ways(17) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=17: expected=4 actual=%d", get_replace_ways(17)) assert(get_replace_ways(18) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=18: expected=4 actual=%d", get_replace_ways(18)) assert(get_replace_ways(19) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=19: expected=4 actual=%d", get_replace_ways(19)) assert(get_replace_ways(20) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=20: expected=4 actual=%d", get_replace_ways(20)) assert(get_replace_ways(21) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=21: expected=4 actual=%d", get_replace_ways(21)) assert(get_replace_ways(22) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=22: expected=4 actual=%d", get_replace_ways(22)) assert(get_replace_ways(23) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=23: expected=4 actual=%d", get_replace_ways(23)) assert(get_replace_ways(24) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=24: expected=5 actual=%d", get_replace_ways(24)) assert(get_replace_ways(25) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=25: expected=5 actual=%d", get_replace_ways(25)) assert(get_replace_ways(26) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=26: expected=5 actual=%d", get_replace_ways(26)) assert(get_replace_ways(27) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=27: expected=5 actual=%d", get_replace_ways(27)) assert(get_replace_ways(28) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=28: expected=5 actual=%d", get_replace_ways(28)) assert(get_replace_ways(29) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=29: expected=5 actual=%d", get_replace_ways(29)) assert(get_replace_ways(30) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=30: expected=5 actual=%d", get_replace_ways(30)) assert(get_replace_ways(31) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=31: expected=5 actual=%d", get_replace_ways(31)) } case _ => throw new IllegalArgumentException(s"no test pattern found for n_ways=$n_ways") } } File HellaCache.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3.{dontTouch, _} import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.bundlebridge._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.amba.AMBAProtField import freechips.rocketchip.diplomacy.{IdRange, TransferSizes, RegionType} import freechips.rocketchip.tile.{L1CacheParams, HasL1CacheParameters, HasCoreParameters, CoreBundle, HasNonDiplomaticTileParameters, BaseTile, HasTileParameters} import freechips.rocketchip.tilelink.{TLMasterParameters, TLClientNode, TLMasterPortParameters, TLEdgeOut, TLWidthWidget, TLFIFOFixer, ClientMetadata} import freechips.rocketchip.util.{Code, RandomReplacement, ParameterizedBundle} import freechips.rocketchip.util.{BooleanToAugmentedBoolean, IntToAugmentedInt} import scala.collection.mutable.ListBuffer case class DCacheParams( nSets: Int = 64, nWays: Int = 4, rowBits: Int = 64, subWordBits: Option[Int] = None, replacementPolicy: String = "random", nTLBSets: Int = 1, nTLBWays: Int = 32, nTLBBasePageSectors: Int = 4, nTLBSuperpages: Int = 4, tagECC: Option[String] = None, dataECC: Option[String] = None, dataECCBytes: Int = 1, nMSHRs: Int = 1, nSDQ: Int = 17, nRPQ: Int = 16, nMMIOs: Int = 1, blockBytes: Int = 64, separateUncachedResp: Boolean = false, acquireBeforeRelease: Boolean = false, pipelineWayMux: Boolean = false, clockGate: Boolean = false, scratch: Option[BigInt] = None) extends L1CacheParams { def tagCode: Code = Code.fromString(tagECC) def dataCode: Code = Code.fromString(dataECC) def dataScratchpadBytes: Int = scratch.map(_ => nSets*blockBytes).getOrElse(0) def replacement = new RandomReplacement(nWays) def silentDrop: Boolean = !acquireBeforeRelease require((!scratch.isDefined || nWays == 1), "Scratchpad only allowed in direct-mapped cache.") require((!scratch.isDefined || nMSHRs == 0), "Scratchpad only allowed in blocking cache.") if (scratch.isEmpty) require(isPow2(nSets), s"nSets($nSets) must be pow2") } trait HasL1HellaCacheParameters extends HasL1CacheParameters with HasCoreParameters { val cacheParams = tileParams.dcache.get val cfg = cacheParams def wordBits = coreDataBits def wordBytes = coreDataBytes def subWordBits = cacheParams.subWordBits.getOrElse(wordBits) def subWordBytes = subWordBits / 8 def wordOffBits = log2Up(wordBytes) def beatBytes = cacheBlockBytes / cacheDataBeats def beatWords = beatBytes / wordBytes def beatOffBits = log2Up(beatBytes) def idxMSB = untagBits-1 def idxLSB = blockOffBits def offsetmsb = idxLSB-1 def offsetlsb = wordOffBits def rowWords = rowBits/wordBits def doNarrowRead = coreDataBits * nWays % rowBits == 0 def eccBytes = cacheParams.dataECCBytes val eccBits = cacheParams.dataECCBytes * 8 val encBits = cacheParams.dataCode.width(eccBits) val encWordBits = encBits * (wordBits / eccBits) def encDataBits = cacheParams.dataCode.width(coreDataBits) // NBDCache only def encRowBits = encDataBits*rowWords def lrscCycles = coreParams.lrscCycles // ISA requires 16-insn LRSC sequences to succeed def lrscBackoff = 3 // disallow LRSC reacquisition briefly def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant def nIOMSHRs = cacheParams.nMMIOs def maxUncachedInFlight = cacheParams.nMMIOs def dataScratchpadSize = cacheParams.dataScratchpadBytes require(rowBits >= coreDataBits, s"rowBits($rowBits) < coreDataBits($coreDataBits)") if (!usingDataScratchpad) require(rowBits == cacheDataBits, s"rowBits($rowBits) != cacheDataBits($cacheDataBits)") // would need offset addr for puts if data width < xlen require(xLen <= cacheDataBits, s"xLen($xLen) > cacheDataBits($cacheDataBits)") } abstract class L1HellaCacheModule(implicit val p: Parameters) extends Module with HasL1HellaCacheParameters abstract class L1HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p) with HasL1HellaCacheParameters /** Bundle definitions for HellaCache interfaces */ trait HasCoreMemOp extends HasL1HellaCacheParameters { val addr = UInt(coreMaxAddrBits.W) val idx = (usingVM && untagBits > pgIdxBits).option(UInt(coreMaxAddrBits.W)) val tag = UInt((coreParams.dcacheReqTagBits + log2Ceil(dcacheArbPorts)).W) val cmd = UInt(M_SZ.W) val size = UInt(log2Ceil(coreDataBytes.log2 + 1).W) val signed = Bool() val dprv = UInt(PRV.SZ.W) val dv = Bool() } trait HasCoreData extends HasCoreParameters { val data = UInt(coreDataBits.W) val mask = UInt(coreDataBytes.W) } class HellaCacheReqInternal(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp { val phys = Bool() val no_resp = Bool() // The dcache may omit generating a response for this request val no_alloc = Bool() val no_xcpt = Bool() } class HellaCacheReq(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData class HellaCacheResp(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp with HasCoreData { val replay = Bool() val has_data = Bool() val data_word_bypass = UInt(coreDataBits.W) val data_raw = UInt(coreDataBits.W) val store_data = UInt(coreDataBits.W) } class AlignmentExceptions extends Bundle { val ld = Bool() val st = Bool() } class HellaCacheExceptions extends Bundle { val ma = new AlignmentExceptions val pf = new AlignmentExceptions val gf = new AlignmentExceptions val ae = new AlignmentExceptions } class HellaCacheWriteData(implicit p: Parameters) extends CoreBundle()(p) with HasCoreData class HellaCachePerfEvents extends Bundle { val acquire = Bool() val release = Bool() val grant = Bool() val tlbMiss = Bool() val blocked = Bool() val canAcceptStoreThenLoad = Bool() val canAcceptStoreThenRMW = Bool() val canAcceptLoadThenLoad = Bool() val storeBufferEmptyAfterLoad = Bool() val storeBufferEmptyAfterStore = Bool() } // interface between D$ and processor/DTLB class HellaCacheIO(implicit p: Parameters) extends CoreBundle()(p) { val req = Decoupled(new HellaCacheReq) val s1_kill = Output(Bool()) // kill previous cycle's req val s1_data = Output(new HellaCacheWriteData()) // data for previous cycle's req val s2_nack = Input(Bool()) // req from two cycles ago is rejected val s2_nack_cause_raw = Input(Bool()) // reason for nack is store-load RAW hazard (performance hint) val s2_kill = Output(Bool()) // kill req from two cycles ago val s2_uncached = Input(Bool()) // advisory signal that the access is MMIO val s2_paddr = Input(UInt(paddrBits.W)) // translated address val resp = Flipped(Valid(new HellaCacheResp)) val replay_next = Input(Bool()) val s2_xcpt = Input(new HellaCacheExceptions) val s2_gpa = Input(UInt(vaddrBitsExtended.W)) val s2_gpa_is_pte = Input(Bool()) val uncached_resp = tileParams.dcache.get.separateUncachedResp.option(Flipped(Decoupled(new HellaCacheResp))) val ordered = Input(Bool()) val store_pending = Input(Bool()) // there is a store in a store buffer somewhere val perf = Input(new HellaCachePerfEvents()) val keep_clock_enabled = Output(Bool()) // should D$ avoid clock-gating itself? val clock_enabled = Input(Bool()) // is D$ currently being clocked? } /** Base classes for Diplomatic TL2 HellaCaches */ abstract class HellaCache(tileId: Int)(implicit p: Parameters) extends LazyModule with HasNonDiplomaticTileParameters { protected val cfg = tileParams.dcache.get protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1( name = s"Core ${tileId} DCache", sourceId = IdRange(0, 1 max cfg.nMSHRs), supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes)))) protected def mmioClientParameters = Seq(TLMasterParameters.v1( name = s"Core ${tileId} DCache MMIO", sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs), requestFifo = true)) def firstMMIO = (cacheClientParameters.map(_.sourceId.end) :+ 0).max val node = TLClientNode(Seq(TLMasterPortParameters.v1( clients = cacheClientParameters ++ mmioClientParameters, minLatency = 1, requestFields = tileParams.core.useVM.option(Seq()).getOrElse(Seq(AMBAProtField()))))) val hartIdSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]()) val mmioAddressPrefixSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]()) val module: HellaCacheModule def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireB || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT) def canSupportCFlushLine = !usingVM || cfg.blockBytes * cfg.nSets <= (1 << pgIdxBits) require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$") } class HellaCacheBundle(implicit p: Parameters) extends CoreBundle()(p) { val cpu = Flipped(new HellaCacheIO) val ptw = new TLBPTWIO() val errors = new DCacheErrors val tlb_port = new DCacheTLBPort } class HellaCacheModule(outer: HellaCache) extends LazyModuleImp(outer) with HasL1HellaCacheParameters { implicit val edge: TLEdgeOut = outer.node.edges.out(0) val (tl_out, _) = outer.node.out(0) val io = IO(new HellaCacheBundle) val io_hartid = outer.hartIdSinkNodeOpt.map(_.bundle) val io_mmio_address_prefix = outer.mmioAddressPrefixSinkNodeOpt.map(_.bundle) dontTouch(io.cpu.resp) // Users like to monitor these fields even if the core ignores some signals dontTouch(io.cpu.s1_data) require(rowBits == edge.bundle.dataBits) private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile) fifoManagers.foreach { m => require (m.fifoId == fifoManagers.head.fifoId, s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees\n"+ s"${m.nodePath.map(_.name)}\nversus\n${fifoManagers.head.nodePath.map(_.name)}") } } /** Support overriding which HellaCache is instantiated */ case object BuildHellaCache extends Field[BaseTile => Parameters => HellaCache](HellaCacheFactory.apply) object HellaCacheFactory { def apply(tile: BaseTile)(p: Parameters): HellaCache = { if (tile.tileParams.dcache.get.nMSHRs == 0) new DCache(tile.tileId, tile.crossing)(p) else new NonBlockingDCache(tile.tileId)(p) } } /** Mix-ins for constructing tiles that have a HellaCache */ trait HasHellaCache { this: BaseTile => val module: HasHellaCacheModule implicit val p: Parameters var nDCachePorts = 0 lazy val dcache: HellaCache = LazyModule(p(BuildHellaCache)(this)(p)) tlMasterXbar.node := TLWidthWidget(tileParams.dcache.get.rowBits/8) := dcache.node dcache.hartIdSinkNodeOpt.map { _ := hartIdNexusNode } dcache.mmioAddressPrefixSinkNodeOpt.map { _ := mmioAddressPrefixNexusNode } InModuleBody { dcache.module.io.tlb_port := DontCare } } trait HasHellaCacheModule { val outer: HasHellaCache with HasTileParameters implicit val p: Parameters val dcachePorts = ListBuffer[HellaCacheIO]() val dcacheArb = Module(new HellaCacheArbiter(outer.nDCachePorts)(outer.p)) outer.dcache.module.io.cpu <> dcacheArb.io.mem } /** Metadata array used for all HellaCaches */ class L1Metadata(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val coh = new ClientMetadata val tag = UInt(tagBits.W) } object L1Metadata { def apply(tag: Bits, coh: ClientMetadata)(implicit p: Parameters) = { val meta = Wire(new L1Metadata) meta.tag := tag meta.coh := coh meta } } class L1MetaReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val idx = UInt(idxBits.W) val way_en = UInt(nWays.W) val tag = UInt(tagBits.W) } class L1MetaWriteReq(implicit p: Parameters) extends L1MetaReadReq()(p) { val data = new L1Metadata } class L1MetadataArray[T <: L1Metadata](onReset: () => T)(implicit p: Parameters) extends L1HellaCacheModule()(p) { val rstVal = onReset() val io = IO(new Bundle { val read = Flipped(Decoupled(new L1MetaReadReq)) val write = Flipped(Decoupled(new L1MetaWriteReq)) val resp = Output(Vec(nWays, rstVal.cloneType)) }) val rst_cnt = RegInit(0.U(log2Up(nSets+1).W)) val rst = rst_cnt < nSets.U val waddr = Mux(rst, rst_cnt, io.write.bits.idx) val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt val wmask = Mux(rst || (nWays == 1).B, (-1).S, io.write.bits.way_en.asSInt).asBools val rmask = Mux(rst || (nWays == 1).B, (-1).S, io.read.bits.way_en.asSInt).asBools when (rst) { rst_cnt := rst_cnt+1.U } val metabits = rstVal.getWidth val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W))) val wen = rst || io.write.valid when (wen) { tag_array.write(waddr, VecInit.fill(nWays)(wdata), wmask) } io.resp := tag_array.read(io.read.bits.idx, io.read.fire).map(_.asTypeOf(chiselTypeOf(rstVal))) io.read.ready := !wen // so really this could be a 6T RAM io.write.ready := !rst } File Consts.scala: // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket.constants import chisel3._ import chisel3.util._ import freechips.rocketchip.util._ trait ScalarOpConstants { val SZ_BR = 3 def BR_X = BitPat("b???") def BR_EQ = 0.U(3.W) def BR_NE = 1.U(3.W) def BR_J = 2.U(3.W) def BR_N = 3.U(3.W) def BR_LT = 4.U(3.W) def BR_GE = 5.U(3.W) def BR_LTU = 6.U(3.W) def BR_GEU = 7.U(3.W) def A1_X = BitPat("b??") def A1_ZERO = 0.U(2.W) def A1_RS1 = 1.U(2.W) def A1_PC = 2.U(2.W) def A1_RS1SHL = 3.U(2.W) def IMM_X = BitPat("b???") def IMM_S = 0.U(3.W) def IMM_SB = 1.U(3.W) def IMM_U = 2.U(3.W) def IMM_UJ = 3.U(3.W) def IMM_I = 4.U(3.W) def IMM_Z = 5.U(3.W) def A2_X = BitPat("b???") def A2_ZERO = 0.U(3.W) def A2_SIZE = 1.U(3.W) def A2_RS2 = 2.U(3.W) def A2_IMM = 3.U(3.W) def A2_RS2OH = 4.U(3.W) def A2_IMMOH = 5.U(3.W) def X = BitPat("b?") def N = BitPat("b0") def Y = BitPat("b1") val SZ_DW = 1 def DW_X = X def DW_32 = false.B def DW_64 = true.B def DW_XPR = DW_64 } trait MemoryOpConstants { val NUM_XA_OPS = 9 val M_SZ = 5 def M_X = BitPat("b?????"); def M_XRD = "b00000".U; // int load def M_XWR = "b00001".U; // int store def M_PFR = "b00010".U; // prefetch with intent to read def M_PFW = "b00011".U; // prefetch with intent to write def M_XA_SWAP = "b00100".U def M_FLUSH_ALL = "b00101".U // flush all lines def M_XLR = "b00110".U def M_XSC = "b00111".U def M_XA_ADD = "b01000".U def M_XA_XOR = "b01001".U def M_XA_OR = "b01010".U def M_XA_AND = "b01011".U def M_XA_MIN = "b01100".U def M_XA_MAX = "b01101".U def M_XA_MINU = "b01110".U def M_XA_MAXU = "b01111".U def M_FLUSH = "b10000".U // write back dirty data and cede R/W permissions def M_PWR = "b10001".U // partial (masked) store def M_PRODUCE = "b10010".U // write back dirty data and cede W permissions def M_CLEAN = "b10011".U // write back dirty data and retain R/W permissions def M_SFENCE = "b10100".U // SFENCE.VMA def M_HFENCEV = "b10101".U // HFENCE.VVMA def M_HFENCEG = "b10110".U // HFENCE.GVMA def M_WOK = "b10111".U // check write permissions but don't perform a write def M_HLVX = "b10000".U // HLVX instruction def isAMOLogical(cmd: UInt) = cmd.isOneOf(M_XA_SWAP, M_XA_XOR, M_XA_OR, M_XA_AND) def isAMOArithmetic(cmd: UInt) = cmd.isOneOf(M_XA_ADD, M_XA_MIN, M_XA_MAX, M_XA_MINU, M_XA_MAXU) def isAMO(cmd: UInt) = isAMOLogical(cmd) || isAMOArithmetic(cmd) def isPrefetch(cmd: UInt) = cmd === M_PFR || cmd === M_PFW def isRead(cmd: UInt) = cmd.isOneOf(M_XRD, M_HLVX, M_XLR, M_XSC) || isAMO(cmd) def isWrite(cmd: UInt) = cmd === M_XWR || cmd === M_PWR || cmd === M_XSC || isAMO(cmd) def isWriteIntent(cmd: UInt) = isWrite(cmd) || cmd === M_PFW || cmd === M_XLR } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File TLB.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.devices.debug.DebugModuleKey import freechips.rocketchip.diplomacy.RegionType import freechips.rocketchip.subsystem.CacheBlockBytes import freechips.rocketchip.tile.{CoreModule, CoreBundle} import freechips.rocketchip.tilelink._ import freechips.rocketchip.util.{OptimizationBarrier, SetAssocLRU, PseudoLRU, PopCountAtLeast, property} import freechips.rocketchip.util.BooleanToAugmentedBoolean import freechips.rocketchip.util.IntToAugmentedInt import freechips.rocketchip.util.UIntToAugmentedUInt import freechips.rocketchip.util.UIntIsOneOf import freechips.rocketchip.util.SeqToAugmentedSeq import freechips.rocketchip.util.SeqBoolBitwiseOps case object ASIdBits extends Field[Int](0) case object VMIdBits extends Field[Int](0) /** =SFENCE= * rs1 rs2 * {{{ * 0 0 -> flush All * 0 1 -> flush by ASID * 1 1 -> flush by ADDR * 1 0 -> flush by ADDR and ASID * }}} * {{{ * If rs1=x0 and rs2=x0, the fence orders all reads and writes made to any level of the page tables, for all address spaces. * If rs1=x0 and rs2!=x0, the fence orders all reads and writes made to any level of the page tables, but only for the address space identified by integer register rs2. Accesses to global mappings (see Section 4.3.1) are not ordered. * If rs1!=x0 and rs2=x0, the fence orders only reads and writes made to the leaf page table entry corresponding to the virtual address in rs1, for all address spaces. * If rs1!=x0 and rs2!=x0, the fence orders only reads and writes made to the leaf page table entry corresponding to the virtual address in rs1, for the address space identified by integer register rs2. Accesses to global mappings are not ordered. * }}} */ class SFenceReq(implicit p: Parameters) extends CoreBundle()(p) { val rs1 = Bool() val rs2 = Bool() val addr = UInt(vaddrBits.W) val asid = UInt((asIdBits max 1).W) // TODO zero-width val hv = Bool() val hg = Bool() } class TLBReq(lgMaxSize: Int)(implicit p: Parameters) extends CoreBundle()(p) { /** request address from CPU. */ val vaddr = UInt(vaddrBitsExtended.W) /** don't lookup TLB, bypass vaddr as paddr */ val passthrough = Bool() /** granularity */ val size = UInt(log2Ceil(lgMaxSize + 1).W) /** memory command. */ val cmd = Bits(M_SZ.W) val prv = UInt(PRV.SZ.W) /** virtualization mode */ val v = Bool() } class TLBExceptions extends Bundle { val ld = Bool() val st = Bool() val inst = Bool() } class TLBResp(lgMaxSize: Int = 3)(implicit p: Parameters) extends CoreBundle()(p) { // lookup responses val miss = Bool() /** physical address */ val paddr = UInt(paddrBits.W) val gpa = UInt(vaddrBitsExtended.W) val gpa_is_pte = Bool() /** page fault exception */ val pf = new TLBExceptions /** guest page fault exception */ val gf = new TLBExceptions /** access exception */ val ae = new TLBExceptions /** misaligned access exception */ val ma = new TLBExceptions /** if this address is cacheable */ val cacheable = Bool() /** if caches must allocate this address */ val must_alloc = Bool() /** if this address is prefetchable for caches*/ val prefetchable = Bool() /** size/cmd of request that generated this response*/ val size = UInt(log2Ceil(lgMaxSize + 1).W) val cmd = UInt(M_SZ.W) } class TLBEntryData(implicit p: Parameters) extends CoreBundle()(p) { val ppn = UInt(ppnBits.W) /** pte.u user */ val u = Bool() /** pte.g global */ val g = Bool() /** access exception. * D$ -> PTW -> TLB AE * Alignment failed. */ val ae_ptw = Bool() val ae_final = Bool() val ae_stage2 = Bool() /** page fault */ val pf = Bool() /** guest page fault */ val gf = Bool() /** supervisor write */ val sw = Bool() /** supervisor execute */ val sx = Bool() /** supervisor read */ val sr = Bool() /** hypervisor write */ val hw = Bool() /** hypervisor excute */ val hx = Bool() /** hypervisor read */ val hr = Bool() /** prot_w */ val pw = Bool() /** prot_x */ val px = Bool() /** prot_r */ val pr = Bool() /** PutPartial */ val ppp = Bool() /** AMO logical */ val pal = Bool() /** AMO arithmetic */ val paa = Bool() /** get/put effects */ val eff = Bool() /** cacheable */ val c = Bool() /** fragmented_superpage support */ val fragmented_superpage = Bool() } /** basic cell for TLB data */ class TLBEntry(val nSectors: Int, val superpage: Boolean, val superpageOnly: Boolean)(implicit p: Parameters) extends CoreBundle()(p) { require(nSectors == 1 || !superpage) require(!superpageOnly || superpage) val level = UInt(log2Ceil(pgLevels).W) /** use vpn as tag */ val tag_vpn = UInt(vpnBits.W) /** tag in vitualization mode */ val tag_v = Bool() /** entry data */ val data = Vec(nSectors, UInt(new TLBEntryData().getWidth.W)) /** valid bit */ val valid = Vec(nSectors, Bool()) /** returns all entry data in this entry */ def entry_data = data.map(_.asTypeOf(new TLBEntryData)) /** returns the index of sector */ private def sectorIdx(vpn: UInt) = vpn.extract(nSectors.log2-1, 0) /** returns the entry data matched with this vpn*/ def getData(vpn: UInt) = OptimizationBarrier(data(sectorIdx(vpn)).asTypeOf(new TLBEntryData)) /** returns whether a sector hits */ def sectorHit(vpn: UInt, virtual: Bool) = valid.orR && sectorTagMatch(vpn, virtual) /** returns whether tag matches vpn */ def sectorTagMatch(vpn: UInt, virtual: Bool) = (((tag_vpn ^ vpn) >> nSectors.log2) === 0.U) && (tag_v === virtual) /** returns hit signal */ def hit(vpn: UInt, virtual: Bool): Bool = { if (superpage && usingVM) { var tagMatch = valid.head && (tag_v === virtual) for (j <- 0 until pgLevels) { val base = (pgLevels - 1 - j) * pgLevelBits val n = pgLevelBits + (if (j == 0) hypervisorExtraAddrBits else 0) val ignore = level < j.U || (superpageOnly && j == pgLevels - 1).B tagMatch = tagMatch && (ignore || (tag_vpn ^ vpn)(base + n - 1, base) === 0.U) } tagMatch } else { val idx = sectorIdx(vpn) valid(idx) && sectorTagMatch(vpn, virtual) } } /** returns the ppn of the input TLBEntryData */ def ppn(vpn: UInt, data: TLBEntryData) = { val supervisorVPNBits = pgLevels * pgLevelBits if (superpage && usingVM) { var res = data.ppn >> pgLevelBits*(pgLevels - 1) for (j <- 1 until pgLevels) { val ignore = level < j.U || (superpageOnly && j == pgLevels - 1).B res = Cat(res, (Mux(ignore, vpn, 0.U) | data.ppn)(supervisorVPNBits - j*pgLevelBits - 1, supervisorVPNBits - (j + 1)*pgLevelBits)) } res } else { data.ppn } } /** does the refill * * find the target entry with vpn tag * and replace the target entry with the input entry data */ def insert(vpn: UInt, virtual: Bool, level: UInt, entry: TLBEntryData): Unit = { this.tag_vpn := vpn this.tag_v := virtual this.level := level.extract(log2Ceil(pgLevels - superpageOnly.toInt)-1, 0) val idx = sectorIdx(vpn) valid(idx) := true.B data(idx) := entry.asUInt } def invalidate(): Unit = { valid.foreach(_ := false.B) } def invalidate(virtual: Bool): Unit = { for ((v, e) <- valid zip entry_data) when (tag_v === virtual) { v := false.B } } def invalidateVPN(vpn: UInt, virtual: Bool): Unit = { if (superpage) { when (hit(vpn, virtual)) { invalidate() } } else { when (sectorTagMatch(vpn, virtual)) { for (((v, e), i) <- (valid zip entry_data).zipWithIndex) when (tag_v === virtual && i.U === sectorIdx(vpn)) { v := false.B } } } // For fragmented superpage mappings, we assume the worst (largest) // case, and zap entries whose most-significant VPNs match when (((tag_vpn ^ vpn) >> (pgLevelBits * (pgLevels - 1))) === 0.U) { for ((v, e) <- valid zip entry_data) when (tag_v === virtual && e.fragmented_superpage) { v := false.B } } } def invalidateNonGlobal(virtual: Bool): Unit = { for ((v, e) <- valid zip entry_data) when (tag_v === virtual && !e.g) { v := false.B } } } /** TLB config * * @param nSets the number of sets of PTE, follow [[ICacheParams.nSets]] * @param nWays the total number of wayss of PTE, follow [[ICacheParams.nWays]] * @param nSectors the number of ways in a single PTE TLBEntry * @param nSuperpageEntries the number of SuperpageEntries */ case class TLBConfig( nSets: Int, nWays: Int, nSectors: Int = 4, nSuperpageEntries: Int = 4) /** =Overview= * [[TLB]] is a TLB template which contains PMA logic and PMP checker. * * TLB caches PTE and accelerates the address translation process. * When tlb miss happens, ask PTW(L2TLB) for Page Table Walk. * Perform PMP and PMA check during the translation and throw exception if there were any. * * ==Cache Structure== * - Sectored Entry (PTE) * - set-associative or direct-mapped * - nsets = [[TLBConfig.nSets]] * - nways = [[TLBConfig.nWays]] / [[TLBConfig.nSectors]] * - PTEEntry( sectors = [[TLBConfig.nSectors]] ) * - LRU(if set-associative) * * - Superpage Entry(superpage PTE) * - fully associative * - nsets = [[TLBConfig.nSuperpageEntries]] * - PTEEntry(sectors = 1) * - PseudoLRU * * - Special Entry(PTE across PMP) * - nsets = 1 * - PTEEntry(sectors = 1) * * ==Address structure== * {{{ * |vaddr | * |ppn/vpn | pgIndex | * | | | * | |nSets |nSector | |}}} * * ==State Machine== * {{{ * s_ready: ready to accept request from CPU. * s_request: when L1TLB(this) miss, send request to PTW(L2TLB), . * s_wait: wait for PTW to refill L1TLB. * s_wait_invalidate: L1TLB is waiting for respond from PTW, but L1TLB will invalidate respond from PTW.}}} * * ==PMP== * pmp check * - special_entry: always check * - other entry: check on refill * * ==Note== * PMA consume diplomacy parameter generate physical memory address checking logic * * Boom use Rocket ITLB, and its own DTLB. * * Accelerators:{{{ * sha3: DTLB * gemmini: DTLB * hwacha: DTLB*2+ITLB}}} * @param instruction true for ITLB, false for DTLB * @param lgMaxSize @todo seems granularity * @param cfg [[TLBConfig]] * @param edge collect SoC metadata. */ class TLB(instruction: Boolean, lgMaxSize: Int, cfg: TLBConfig)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) { override def desiredName = if (instruction) "ITLB" else "DTLB" val io = IO(new Bundle { /** request from Core */ val req = Flipped(Decoupled(new TLBReq(lgMaxSize))) /** response to Core */ val resp = Output(new TLBResp(lgMaxSize)) /** SFence Input */ val sfence = Flipped(Valid(new SFenceReq)) /** IO to PTW */ val ptw = new TLBPTWIO /** suppress a TLB refill, one cycle after a miss */ val kill = Input(Bool()) }) io.ptw.customCSRs := DontCare val pageGranularityPMPs = pmpGranularity >= (1 << pgIdxBits) val vpn = io.req.bits.vaddr(vaddrBits-1, pgIdxBits) /** index for sectored_Entry */ val memIdx = vpn.extract(cfg.nSectors.log2 + cfg.nSets.log2 - 1, cfg.nSectors.log2) /** TLB Entry */ val sectored_entries = Reg(Vec(cfg.nSets, Vec(cfg.nWays / cfg.nSectors, new TLBEntry(cfg.nSectors, false, false)))) /** Superpage Entry */ val superpage_entries = Reg(Vec(cfg.nSuperpageEntries, new TLBEntry(1, true, true))) /** Special Entry * * If PMP granularity is less than page size, thus need additional "special" entry manage PMP. */ val special_entry = (!pageGranularityPMPs).option(Reg(new TLBEntry(1, true, false))) def ordinary_entries = sectored_entries(memIdx) ++ superpage_entries def all_entries = ordinary_entries ++ special_entry def all_real_entries = sectored_entries.flatten ++ superpage_entries ++ special_entry val s_ready :: s_request :: s_wait :: s_wait_invalidate :: Nil = Enum(4) val state = RegInit(s_ready) // use vpn as refill_tag val r_refill_tag = Reg(UInt(vpnBits.W)) val r_superpage_repl_addr = Reg(UInt(log2Ceil(superpage_entries.size).W)) val r_sectored_repl_addr = Reg(UInt(log2Ceil(sectored_entries.head.size).W)) val r_sectored_hit = Reg(Valid(UInt(log2Ceil(sectored_entries.head.size).W))) val r_superpage_hit = Reg(Valid(UInt(log2Ceil(superpage_entries.size).W))) val r_vstage1_en = Reg(Bool()) val r_stage2_en = Reg(Bool()) val r_need_gpa = Reg(Bool()) val r_gpa_valid = Reg(Bool()) val r_gpa = Reg(UInt(vaddrBits.W)) val r_gpa_vpn = Reg(UInt(vpnBits.W)) val r_gpa_is_pte = Reg(Bool()) /** privilege mode */ val priv = io.req.bits.prv val priv_v = usingHypervisor.B && io.req.bits.v val priv_s = priv(0) // user mode and supervisor mode val priv_uses_vm = priv <= PRV.S.U val satp = Mux(priv_v, io.ptw.vsatp, io.ptw.ptbr) val stage1_en = usingVM.B && satp.mode(satp.mode.getWidth-1) /** VS-stage translation enable */ val vstage1_en = usingHypervisor.B && priv_v && io.ptw.vsatp.mode(io.ptw.vsatp.mode.getWidth-1) /** G-stage translation enable */ val stage2_en = usingHypervisor.B && priv_v && io.ptw.hgatp.mode(io.ptw.hgatp.mode.getWidth-1) /** Enable Virtual Memory when: * 1. statically configured * 1. satp highest bits enabled * i. RV32: * - 0 -> Bare * - 1 -> SV32 * i. RV64: * - 0000 -> Bare * - 1000 -> SV39 * - 1001 -> SV48 * - 1010 -> SV57 * - 1011 -> SV64 * 1. In virtualization mode, vsatp highest bits enabled * 1. priv mode in U and S. * 1. in H & M mode, disable VM. * 1. no passthrough(micro-arch defined.) * * @see RV-priv spec 4.1.11 Supervisor Address Translation and Protection (satp) Register * @see RV-priv spec 8.2.18 Virtual Supervisor Address Translation and Protection Register (vsatp) */ val vm_enabled = (stage1_en || stage2_en) && priv_uses_vm && !io.req.bits.passthrough // flush guest entries on vsatp.MODE Bare <-> SvXX transitions val v_entries_use_stage1 = RegInit(false.B) val vsatp_mode_mismatch = priv_v && (vstage1_en =/= v_entries_use_stage1) && !io.req.bits.passthrough // share a single physical memory attribute checker (unshare if critical path) val refill_ppn = io.ptw.resp.bits.pte.ppn(ppnBits-1, 0) /** refill signal */ val do_refill = usingVM.B && io.ptw.resp.valid /** sfence invalidate refill */ val invalidate_refill = state.isOneOf(s_request /* don't care */, s_wait_invalidate) || io.sfence.valid // PMP val mpu_ppn = Mux(do_refill, refill_ppn, Mux(vm_enabled && special_entry.nonEmpty.B, special_entry.map(e => e.ppn(vpn, e.getData(vpn))).getOrElse(0.U), io.req.bits.vaddr >> pgIdxBits)) val mpu_physaddr = Cat(mpu_ppn, io.req.bits.vaddr(pgIdxBits-1, 0)) val mpu_priv = Mux[UInt](usingVM.B && (do_refill || io.req.bits.passthrough /* PTW */), PRV.S.U, Cat(io.ptw.status.debug, priv)) val pmp = Module(new PMPChecker(lgMaxSize)) pmp.io.addr := mpu_physaddr pmp.io.size := io.req.bits.size pmp.io.pmp := (io.ptw.pmp: Seq[PMP]) pmp.io.prv := mpu_priv val pma = Module(new PMAChecker(edge.manager)(p)) pma.io.paddr := mpu_physaddr // todo: using DataScratchpad doesn't support cacheable. val cacheable = pma.io.resp.cacheable && (instruction || !usingDataScratchpad).B val homogeneous = TLBPageLookup(edge.manager.managers, xLen, p(CacheBlockBytes), BigInt(1) << pgIdxBits, 1 << lgMaxSize)(mpu_physaddr).homogeneous // In M mode, if access DM address(debug module program buffer) val deny_access_to_debug = mpu_priv <= PRV.M.U && p(DebugModuleKey).map(dmp => dmp.address.contains(mpu_physaddr)).getOrElse(false.B) val prot_r = pma.io.resp.r && !deny_access_to_debug && pmp.io.r val prot_w = pma.io.resp.w && !deny_access_to_debug && pmp.io.w val prot_pp = pma.io.resp.pp val prot_al = pma.io.resp.al val prot_aa = pma.io.resp.aa val prot_x = pma.io.resp.x && !deny_access_to_debug && pmp.io.x val prot_eff = pma.io.resp.eff // hit check val sector_hits = sectored_entries(memIdx).map(_.sectorHit(vpn, priv_v)) val superpage_hits = superpage_entries.map(_.hit(vpn, priv_v)) val hitsVec = all_entries.map(vm_enabled && _.hit(vpn, priv_v)) val real_hits = hitsVec.asUInt val hits = Cat(!vm_enabled, real_hits) // use ptw response to refill // permission bit arrays when (do_refill) { val pte = io.ptw.resp.bits.pte val refill_v = r_vstage1_en || r_stage2_en val newEntry = Wire(new TLBEntryData) newEntry.ppn := pte.ppn newEntry.c := cacheable newEntry.u := pte.u newEntry.g := pte.g && pte.v newEntry.ae_ptw := io.ptw.resp.bits.ae_ptw newEntry.ae_final := io.ptw.resp.bits.ae_final newEntry.ae_stage2 := io.ptw.resp.bits.ae_final && io.ptw.resp.bits.gpa_is_pte && r_stage2_en newEntry.pf := io.ptw.resp.bits.pf newEntry.gf := io.ptw.resp.bits.gf newEntry.hr := io.ptw.resp.bits.hr newEntry.hw := io.ptw.resp.bits.hw newEntry.hx := io.ptw.resp.bits.hx newEntry.sr := pte.sr() newEntry.sw := pte.sw() newEntry.sx := pte.sx() newEntry.pr := prot_r newEntry.pw := prot_w newEntry.px := prot_x newEntry.ppp := prot_pp newEntry.pal := prot_al newEntry.paa := prot_aa newEntry.eff := prot_eff newEntry.fragmented_superpage := io.ptw.resp.bits.fragmented_superpage // refill special_entry when (special_entry.nonEmpty.B && !io.ptw.resp.bits.homogeneous) { special_entry.foreach(_.insert(r_refill_tag, refill_v, io.ptw.resp.bits.level, newEntry)) }.elsewhen (io.ptw.resp.bits.level < (pgLevels-1).U) { val waddr = Mux(r_superpage_hit.valid && usingHypervisor.B, r_superpage_hit.bits, r_superpage_repl_addr) for ((e, i) <- superpage_entries.zipWithIndex) when (r_superpage_repl_addr === i.U) { e.insert(r_refill_tag, refill_v, io.ptw.resp.bits.level, newEntry) when (invalidate_refill) { e.invalidate() } } // refill sectored_hit }.otherwise { val r_memIdx = r_refill_tag.extract(cfg.nSectors.log2 + cfg.nSets.log2 - 1, cfg.nSectors.log2) val waddr = Mux(r_sectored_hit.valid, r_sectored_hit.bits, r_sectored_repl_addr) for ((e, i) <- sectored_entries(r_memIdx).zipWithIndex) when (waddr === i.U) { when (!r_sectored_hit.valid) { e.invalidate() } e.insert(r_refill_tag, refill_v, 0.U, newEntry) when (invalidate_refill) { e.invalidate() } } } r_gpa_valid := io.ptw.resp.bits.gpa.valid r_gpa := io.ptw.resp.bits.gpa.bits r_gpa_is_pte := io.ptw.resp.bits.gpa_is_pte } // get all entries data. val entries = all_entries.map(_.getData(vpn)) val normal_entries = entries.take(ordinary_entries.size) // parallel query PPN from [[all_entries]], if VM not enabled return VPN instead val ppn = Mux1H(hitsVec :+ !vm_enabled, (all_entries zip entries).map{ case (entry, data) => entry.ppn(vpn, data) } :+ vpn(ppnBits-1, 0)) val nPhysicalEntries = 1 + special_entry.size // generally PTW misaligned load exception. val ptw_ae_array = Cat(false.B, entries.map(_.ae_ptw).asUInt) val final_ae_array = Cat(false.B, entries.map(_.ae_final).asUInt) val ptw_pf_array = Cat(false.B, entries.map(_.pf).asUInt) val ptw_gf_array = Cat(false.B, entries.map(_.gf).asUInt) val sum = Mux(priv_v, io.ptw.gstatus.sum, io.ptw.status.sum) // if in hypervisor/machine mode, cannot read/write user entries. // if in superviosr/user mode, "If the SUM bit in the sstatus register is set, supervisor mode software may also access pages with U=1.(from spec)" val priv_rw_ok = Mux(!priv_s || sum, entries.map(_.u).asUInt, 0.U) | Mux(priv_s, ~entries.map(_.u).asUInt, 0.U) // if in hypervisor/machine mode, other than user pages, all pages are executable. // if in superviosr/user mode, only user page can execute. val priv_x_ok = Mux(priv_s, ~entries.map(_.u).asUInt, entries.map(_.u).asUInt) val stage1_bypass = Fill(entries.size, usingHypervisor.B) & (Fill(entries.size, !stage1_en) | entries.map(_.ae_stage2).asUInt) val mxr = io.ptw.status.mxr | Mux(priv_v, io.ptw.gstatus.mxr, false.B) // "The vsstatus field MXR, which makes execute-only pages readable, only overrides VS-stage page protection.(from spec)" val r_array = Cat(true.B, (priv_rw_ok & (entries.map(_.sr).asUInt | Mux(mxr, entries.map(_.sx).asUInt, 0.U))) | stage1_bypass) val w_array = Cat(true.B, (priv_rw_ok & entries.map(_.sw).asUInt) | stage1_bypass) val x_array = Cat(true.B, (priv_x_ok & entries.map(_.sx).asUInt) | stage1_bypass) val stage2_bypass = Fill(entries.size, !stage2_en) val hr_array = Cat(true.B, entries.map(_.hr).asUInt | Mux(io.ptw.status.mxr, entries.map(_.hx).asUInt, 0.U) | stage2_bypass) val hw_array = Cat(true.B, entries.map(_.hw).asUInt | stage2_bypass) val hx_array = Cat(true.B, entries.map(_.hx).asUInt | stage2_bypass) // These array is for each TLB entries. // user mode can read: PMA OK, TLB OK, AE OK val pr_array = Cat(Fill(nPhysicalEntries, prot_r), normal_entries.map(_.pr).asUInt) & ~(ptw_ae_array | final_ae_array) // user mode can write: PMA OK, TLB OK, AE OK val pw_array = Cat(Fill(nPhysicalEntries, prot_w), normal_entries.map(_.pw).asUInt) & ~(ptw_ae_array | final_ae_array) // user mode can write: PMA OK, TLB OK, AE OK val px_array = Cat(Fill(nPhysicalEntries, prot_x), normal_entries.map(_.px).asUInt) & ~(ptw_ae_array | final_ae_array) // put effect val eff_array = Cat(Fill(nPhysicalEntries, prot_eff), normal_entries.map(_.eff).asUInt) // cacheable val c_array = Cat(Fill(nPhysicalEntries, cacheable), normal_entries.map(_.c).asUInt) // put partial val ppp_array = Cat(Fill(nPhysicalEntries, prot_pp), normal_entries.map(_.ppp).asUInt) // atomic arithmetic val paa_array = Cat(Fill(nPhysicalEntries, prot_aa), normal_entries.map(_.paa).asUInt) // atomic logic val pal_array = Cat(Fill(nPhysicalEntries, prot_al), normal_entries.map(_.pal).asUInt) val ppp_array_if_cached = ppp_array | c_array val paa_array_if_cached = paa_array | (if(usingAtomicsInCache) c_array else 0.U) val pal_array_if_cached = pal_array | (if(usingAtomicsInCache) c_array else 0.U) val prefetchable_array = Cat((cacheable && homogeneous) << (nPhysicalEntries-1), normal_entries.map(_.c).asUInt) // vaddr misaligned: vaddr[1:0]=b00 val misaligned = (io.req.bits.vaddr & (UIntToOH(io.req.bits.size) - 1.U)).orR def badVA(guestPA: Boolean): Bool = { val additionalPgLevels = (if (guestPA) io.ptw.hgatp else satp).additionalPgLevels val extraBits = if (guestPA) hypervisorExtraAddrBits else 0 val signed = !guestPA val nPgLevelChoices = pgLevels - minPgLevels + 1 val minVAddrBits = pgIdxBits + minPgLevels * pgLevelBits + extraBits (for (i <- 0 until nPgLevelChoices) yield { val mask = ((BigInt(1) << vaddrBitsExtended) - (BigInt(1) << (minVAddrBits + i * pgLevelBits - signed.toInt))).U val maskedVAddr = io.req.bits.vaddr & mask additionalPgLevels === i.U && !(maskedVAddr === 0.U || signed.B && maskedVAddr === mask) }).orR } val bad_gpa = if (!usingHypervisor) false.B else vm_enabled && !stage1_en && badVA(true) val bad_va = if (!usingVM || (minPgLevels == pgLevels && vaddrBits == vaddrBitsExtended)) false.B else vm_enabled && stage1_en && badVA(false) val cmd_lrsc = usingAtomics.B && io.req.bits.cmd.isOneOf(M_XLR, M_XSC) val cmd_amo_logical = usingAtomics.B && isAMOLogical(io.req.bits.cmd) val cmd_amo_arithmetic = usingAtomics.B && isAMOArithmetic(io.req.bits.cmd) val cmd_put_partial = io.req.bits.cmd === M_PWR val cmd_read = isRead(io.req.bits.cmd) val cmd_readx = usingHypervisor.B && io.req.bits.cmd === M_HLVX val cmd_write = isWrite(io.req.bits.cmd) val cmd_write_perms = cmd_write || io.req.bits.cmd.isOneOf(M_FLUSH_ALL, M_WOK) // not a write, but needs write permissions val lrscAllowed = Mux((usingDataScratchpad || usingAtomicsOnlyForIO).B, 0.U, c_array) val ae_array = Mux(misaligned, eff_array, 0.U) | Mux(cmd_lrsc, ~lrscAllowed, 0.U) // access exception needs SoC information from PMA val ae_ld_array = Mux(cmd_read, ae_array | ~pr_array, 0.U) val ae_st_array = Mux(cmd_write_perms, ae_array | ~pw_array, 0.U) | Mux(cmd_put_partial, ~ppp_array_if_cached, 0.U) | Mux(cmd_amo_logical, ~pal_array_if_cached, 0.U) | Mux(cmd_amo_arithmetic, ~paa_array_if_cached, 0.U) val must_alloc_array = Mux(cmd_put_partial, ~ppp_array, 0.U) | Mux(cmd_amo_logical, ~pal_array, 0.U) | Mux(cmd_amo_arithmetic, ~paa_array, 0.U) | Mux(cmd_lrsc, ~0.U(pal_array.getWidth.W), 0.U) val pf_ld_array = Mux(cmd_read, ((~Mux(cmd_readx, x_array, r_array) & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array, 0.U) val pf_st_array = Mux(cmd_write_perms, ((~w_array & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array, 0.U) val pf_inst_array = ((~x_array & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array val gf_ld_array = Mux(priv_v && cmd_read, (~Mux(cmd_readx, hx_array, hr_array) | ptw_gf_array) & ~ptw_ae_array, 0.U) val gf_st_array = Mux(priv_v && cmd_write_perms, (~hw_array | ptw_gf_array) & ~ptw_ae_array, 0.U) val gf_inst_array = Mux(priv_v, (~hx_array | ptw_gf_array) & ~ptw_ae_array, 0.U) val gpa_hits = { val need_gpa_mask = if (instruction) gf_inst_array else gf_ld_array | gf_st_array val hit_mask = Fill(ordinary_entries.size, r_gpa_valid && r_gpa_vpn === vpn) | Fill(all_entries.size, !vstage1_en) hit_mask | ~need_gpa_mask(all_entries.size-1, 0) } val tlb_hit_if_not_gpa_miss = real_hits.orR val tlb_hit = (real_hits & gpa_hits).orR // leads to s_request val tlb_miss = vm_enabled && !vsatp_mode_mismatch && !bad_va && !tlb_hit val sectored_plru = new SetAssocLRU(cfg.nSets, sectored_entries.head.size, "plru") val superpage_plru = new PseudoLRU(superpage_entries.size) when (io.req.valid && vm_enabled) { // replace when (sector_hits.orR) { sectored_plru.access(memIdx, OHToUInt(sector_hits)) } when (superpage_hits.orR) { superpage_plru.access(OHToUInt(superpage_hits)) } } // Superpages create the possibility that two entries in the TLB may match. // This corresponds to a software bug, but we can't return complete garbage; // we must return either the old translation or the new translation. This // isn't compatible with the Mux1H approach. So, flush the TLB and report // a miss on duplicate entries. val multipleHits = PopCountAtLeast(real_hits, 2) // only pull up req.ready when this is s_ready state. io.req.ready := state === s_ready // page fault io.resp.pf.ld := (bad_va && cmd_read) || (pf_ld_array & hits).orR io.resp.pf.st := (bad_va && cmd_write_perms) || (pf_st_array & hits).orR io.resp.pf.inst := bad_va || (pf_inst_array & hits).orR // guest page fault io.resp.gf.ld := (bad_gpa && cmd_read) || (gf_ld_array & hits).orR io.resp.gf.st := (bad_gpa && cmd_write_perms) || (gf_st_array & hits).orR io.resp.gf.inst := bad_gpa || (gf_inst_array & hits).orR // access exception io.resp.ae.ld := (ae_ld_array & hits).orR io.resp.ae.st := (ae_st_array & hits).orR io.resp.ae.inst := (~px_array & hits).orR // misaligned io.resp.ma.ld := misaligned && cmd_read io.resp.ma.st := misaligned && cmd_write io.resp.ma.inst := false.B // this is up to the pipeline to figure out io.resp.cacheable := (c_array & hits).orR io.resp.must_alloc := (must_alloc_array & hits).orR io.resp.prefetchable := (prefetchable_array & hits).orR && edge.manager.managers.forall(m => !m.supportsAcquireB || m.supportsHint).B io.resp.miss := do_refill || vsatp_mode_mismatch || tlb_miss || multipleHits io.resp.paddr := Cat(ppn, io.req.bits.vaddr(pgIdxBits-1, 0)) io.resp.size := io.req.bits.size io.resp.cmd := io.req.bits.cmd io.resp.gpa_is_pte := vstage1_en && r_gpa_is_pte io.resp.gpa := { val page = Mux(!vstage1_en, Cat(bad_gpa, vpn), r_gpa >> pgIdxBits) val offset = Mux(io.resp.gpa_is_pte, r_gpa(pgIdxBits-1, 0), io.req.bits.vaddr(pgIdxBits-1, 0)) Cat(page, offset) } io.ptw.req.valid := state === s_request io.ptw.req.bits.valid := !io.kill io.ptw.req.bits.bits.addr := r_refill_tag io.ptw.req.bits.bits.vstage1 := r_vstage1_en io.ptw.req.bits.bits.stage2 := r_stage2_en io.ptw.req.bits.bits.need_gpa := r_need_gpa if (usingVM) { when(io.ptw.req.fire && io.ptw.req.bits.valid) { r_gpa_valid := false.B r_gpa_vpn := r_refill_tag } val sfence = io.sfence.valid // this is [[s_ready]] // handle miss/hit at the first cycle. // if miss, request PTW(L2TLB). when (io.req.fire && tlb_miss) { state := s_request r_refill_tag := vpn r_need_gpa := tlb_hit_if_not_gpa_miss r_vstage1_en := vstage1_en r_stage2_en := stage2_en r_superpage_repl_addr := replacementEntry(superpage_entries, superpage_plru.way) r_sectored_repl_addr := replacementEntry(sectored_entries(memIdx), sectored_plru.way(memIdx)) r_sectored_hit.valid := sector_hits.orR r_sectored_hit.bits := OHToUInt(sector_hits) r_superpage_hit.valid := superpage_hits.orR r_superpage_hit.bits := OHToUInt(superpage_hits) } // Handle SFENCE.VMA when send request to PTW. // SFENCE.VMA io.ptw.req.ready kill // ? ? 1 // 0 0 0 // 0 1 0 -> s_wait // 1 0 0 -> s_wait_invalidate // 1 0 0 -> s_ready when (state === s_request) { // SFENCE.VMA will kill TLB entries based on rs1 and rs2. It will take 1 cycle. when (sfence) { state := s_ready } // here should be io.ptw.req.fire, but assert(io.ptw.req.ready === true.B) // fire -> s_wait when (io.ptw.req.ready) { state := Mux(sfence, s_wait_invalidate, s_wait) } // If CPU kills request(frontend.s2_redirect) when (io.kill) { state := s_ready } } // sfence in refill will results in invalidate when (state === s_wait && sfence) { state := s_wait_invalidate } // after CPU acquire response, go back to s_ready. when (io.ptw.resp.valid) { state := s_ready } // SFENCE processing logic. when (sfence) { assert(!io.sfence.bits.rs1 || (io.sfence.bits.addr >> pgIdxBits) === vpn) for (e <- all_real_entries) { val hv = usingHypervisor.B && io.sfence.bits.hv val hg = usingHypervisor.B && io.sfence.bits.hg when (!hg && io.sfence.bits.rs1) { e.invalidateVPN(vpn, hv) } .elsewhen (!hg && io.sfence.bits.rs2) { e.invalidateNonGlobal(hv) } .otherwise { e.invalidate(hv || hg) } } } when(io.req.fire && vsatp_mode_mismatch) { all_real_entries.foreach(_.invalidate(true.B)) v_entries_use_stage1 := vstage1_en } when (multipleHits || reset.asBool) { all_real_entries.foreach(_.invalidate()) } ccover(io.ptw.req.fire, "MISS", "TLB miss") ccover(io.ptw.req.valid && !io.ptw.req.ready, "PTW_STALL", "TLB miss, but PTW busy") ccover(state === s_wait_invalidate, "SFENCE_DURING_REFILL", "flush TLB during TLB refill") ccover(sfence && !io.sfence.bits.rs1 && !io.sfence.bits.rs2, "SFENCE_ALL", "flush TLB") ccover(sfence && !io.sfence.bits.rs1 && io.sfence.bits.rs2, "SFENCE_ASID", "flush TLB ASID") ccover(sfence && io.sfence.bits.rs1 && !io.sfence.bits.rs2, "SFENCE_LINE", "flush TLB line") ccover(sfence && io.sfence.bits.rs1 && io.sfence.bits.rs2, "SFENCE_LINE_ASID", "flush TLB line/ASID") ccover(multipleHits, "MULTIPLE_HITS", "Two matching translations in TLB") } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = property.cover(cond, s"${if (instruction) "I" else "D"}TLB_$label", "MemorySystem;;" + desc) /** Decides which entry to be replaced * * If there is a invalid entry, replace it with priorityencoder; * if not, replace the alt entry * * @return mask for TLBEntry replacement */ def replacementEntry(set: Seq[TLBEntry], alt: UInt) = { val valids = set.map(_.valid.orR).asUInt Mux(valids.andR, alt, PriorityEncoder(~valids)) } } File DCache.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.amba.AMBAProt import freechips.rocketchip.diplomacy.{BufferParams} import freechips.rocketchip.prci.{ClockCrossingType, RationalCrossing, SynchronousCrossing, AsynchronousCrossing, CreditedCrossing} import freechips.rocketchip.tile.{CoreBundle, LookupByHartId} import freechips.rocketchip.tilelink.{TLFIFOFixer,ClientMetadata, TLBundleA, TLAtomics, TLBundleB, TLPermissions} import freechips.rocketchip.tilelink.TLMessages.{AccessAck, HintAck, AccessAckData, Grant, GrantData, ReleaseAck} import freechips.rocketchip.util.{CanHaveErrors, ClockGate, IdentityCode, ReplacementPolicy, DescribedSRAM, property} import freechips.rocketchip.util.BooleanToAugmentedBoolean import freechips.rocketchip.util.UIntToAugmentedUInt import freechips.rocketchip.util.UIntIsOneOf import freechips.rocketchip.util.IntToAugmentedInt import freechips.rocketchip.util.SeqToAugmentedSeq import freechips.rocketchip.util.SeqBoolBitwiseOps // TODO: delete this trait once deduplication is smart enough to avoid globally inlining matching circuits trait InlineInstance { self: chisel3.experimental.BaseModule => chisel3.experimental.annotate( new chisel3.experimental.ChiselAnnotation { def toFirrtl: firrtl.annotations.Annotation = firrtl.passes.InlineAnnotation(self.toNamed) } ) } class DCacheErrors(implicit p: Parameters) extends L1HellaCacheBundle()(p) with CanHaveErrors { val correctable = (cacheParams.tagCode.canCorrect || cacheParams.dataCode.canCorrect).option(Valid(UInt(paddrBits.W))) val uncorrectable = (cacheParams.tagCode.canDetect || cacheParams.dataCode.canDetect).option(Valid(UInt(paddrBits.W))) val bus = Valid(UInt(paddrBits.W)) } class DCacheDataReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val addr = UInt(untagBits.W) val write = Bool() val wdata = UInt((encBits * rowBytes / eccBytes).W) val wordMask = UInt((rowBytes / subWordBytes).W) val eccMask = UInt((wordBytes / eccBytes).W) val way_en = UInt(nWays.W) } class DCacheDataArray(implicit p: Parameters) extends L1HellaCacheModule()(p) { val io = IO(new Bundle { val req = Flipped(Valid(new DCacheDataReq)) val resp = Output(Vec(nWays, UInt((req.bits.wdata.getWidth).W))) }) require(rowBits % subWordBits == 0, "rowBits must be a multiple of subWordBits") val eccMask = if (eccBits == subWordBits) Seq(true.B) else io.req.bits.eccMask.asBools val wMask = if (nWays == 1) eccMask else (0 until nWays).flatMap(i => eccMask.map(_ && io.req.bits.way_en(i))) val wWords = io.req.bits.wdata.grouped(encBits * (subWordBits / eccBits)) val addr = io.req.bits.addr >> rowOffBits val data_arrays = Seq.tabulate(rowBits / subWordBits) { i => DescribedSRAM( name = s"${tileParams.baseName}_dcache_data_arrays_${i}", desc = "DCache Data Array", size = nSets * cacheBlockBytes / rowBytes, data = Vec(nWays * (subWordBits / eccBits), UInt(encBits.W)) ) } val rdata = for ((array , i) <- data_arrays.zipWithIndex) yield { val valid = io.req.valid && ((data_arrays.size == 1).B || io.req.bits.wordMask(i)) when (valid && io.req.bits.write) { val wMaskSlice = (0 until wMask.size).filter(j => i % (wordBits/subWordBits) == (j % (wordBytes/eccBytes)) / (subWordBytes/eccBytes)).map(wMask(_)) val wData = wWords(i).grouped(encBits) array.write(addr, VecInit((0 until nWays).flatMap(i => wData)), wMaskSlice) } val data = array.read(addr, valid && !io.req.bits.write) data.grouped(subWordBits / eccBits).map(_.asUInt).toSeq } (io.resp zip rdata.transpose).foreach { case (resp, data) => resp := data.asUInt } } class DCacheMetadataReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val write = Bool() val addr = UInt(vaddrBitsExtended.W) val idx = UInt(idxBits.W) val way_en = UInt(nWays.W) val data = UInt(cacheParams.tagCode.width(new L1Metadata().getWidth).W) } class DCache(staticIdForMetadataUseOnly: Int, val crossing: ClockCrossingType)(implicit p: Parameters) extends HellaCache(staticIdForMetadataUseOnly)(p) { override lazy val module = new DCacheModule(this) } class DCacheTLBPort(implicit p: Parameters) extends CoreBundle()(p) { val req = Flipped(Decoupled(new TLBReq(coreDataBytes.log2))) val s1_resp = Output(new TLBResp(coreDataBytes.log2)) val s2_kill = Input(Bool()) } class DCacheModule(outer: DCache) extends HellaCacheModule(outer) { val tECC = cacheParams.tagCode val dECC = cacheParams.dataCode require(subWordBits % eccBits == 0, "subWordBits must be a multiple of eccBits") require(eccBytes == 1 || !dECC.isInstanceOf[IdentityCode]) require(cacheParams.silentDrop || cacheParams.acquireBeforeRelease, "!silentDrop requires acquireBeforeRelease") val usingRMW = eccBytes > 1 || usingAtomicsInCache val mmioOffset = outer.firstMMIO edge.manager.requireFifo(TLFIFOFixer.allVolatile) // TileLink pipelining MMIO requests val clock_en_reg = Reg(Bool()) io.cpu.clock_enabled := clock_en_reg val gated_clock = if (!cacheParams.clockGate) clock else ClockGate(clock, clock_en_reg, "dcache_clock_gate") class DCacheModuleImpl { // entering gated-clock domain val tlb = Module(new TLB(false, log2Ceil(coreDataBytes), TLBConfig(nTLBSets, nTLBWays, cacheParams.nTLBBasePageSectors, cacheParams.nTLBSuperpages))) val pma_checker = Module(new TLB(false, log2Ceil(coreDataBytes), TLBConfig(nTLBSets, nTLBWays, cacheParams.nTLBBasePageSectors, cacheParams.nTLBSuperpages)) with InlineInstance) // tags val replacer = ReplacementPolicy.fromString(cacheParams.replacementPolicy, nWays) /** Metadata Arbiter: * 0: Tag update on reset * 1: Tag update on ECC error * 2: Tag update on hit * 3: Tag update on refill * 4: Tag update on release * 5: Tag update on flush * 6: Tag update on probe * 7: Tag update on CPU request */ val metaArb = Module(new Arbiter(new DCacheMetadataReq, 8) with InlineInstance) val tag_array = DescribedSRAM( name = s"${tileParams.baseName}_dcache_tag_array", desc = "DCache Tag Array", size = nSets, data = Vec(nWays, chiselTypeOf(metaArb.io.out.bits.data)) ) // data val data = Module(new DCacheDataArray) /** Data Arbiter * 0: data from pending store buffer * 1: data from TL-D refill * 2: release to TL-A * 3: hit path to CPU */ val dataArb = Module(new Arbiter(new DCacheDataReq, 4) with InlineInstance) dataArb.io.in.tail.foreach(_.bits.wdata := dataArb.io.in.head.bits.wdata) // tie off write ports by default data.io.req.bits <> dataArb.io.out.bits data.io.req.valid := dataArb.io.out.valid dataArb.io.out.ready := true.B metaArb.io.out.ready := clock_en_reg val tl_out_a = Wire(chiselTypeOf(tl_out.a)) tl_out.a <> { val a_queue_depth = outer.crossing match { case RationalCrossing(_) => // TODO make this depend on the actual ratio? if (cacheParams.separateUncachedResp) (maxUncachedInFlight + 1) / 2 else 2 min maxUncachedInFlight-1 case SynchronousCrossing(BufferParams.none) => 1 // Need some buffering to guarantee livelock freedom case SynchronousCrossing(_) => 0 // Adequate buffering within the crossing case _: AsynchronousCrossing => 0 // Adequate buffering within the crossing case _: CreditedCrossing => 0 // Adequate buffering within the crossing } Queue(tl_out_a, a_queue_depth, flow = true) } val (tl_out_c, release_queue_empty) = if (cacheParams.acquireBeforeRelease) { val q = Module(new Queue(chiselTypeOf(tl_out.c.bits), cacheDataBeats, flow = true)) tl_out.c <> q.io.deq (q.io.enq, q.io.count === 0.U) } else { (tl_out.c, true.B) } val s1_valid = RegNext(io.cpu.req.fire, false.B) val s1_probe = RegNext(tl_out.b.fire, false.B) val probe_bits = RegEnable(tl_out.b.bits, tl_out.b.fire) // TODO has data now :( val s1_nack = WireDefault(false.B) val s1_valid_masked = s1_valid && !io.cpu.s1_kill val s1_valid_not_nacked = s1_valid && !s1_nack val s1_tlb_req_valid = RegNext(io.tlb_port.req.fire, false.B) val s2_tlb_req_valid = RegNext(s1_tlb_req_valid, false.B) val s0_clk_en = metaArb.io.out.valid && !metaArb.io.out.bits.write val s0_req = WireInit(io.cpu.req.bits) s0_req.addr := Cat(metaArb.io.out.bits.addr >> blockOffBits, io.cpu.req.bits.addr(blockOffBits-1,0)) s0_req.idx.foreach(_ := Cat(metaArb.io.out.bits.idx, s0_req.addr(blockOffBits-1, 0))) when (!metaArb.io.in(7).ready) { s0_req.phys := true.B } val s1_req = RegEnable(s0_req, s0_clk_en) val s1_vaddr = Cat(s1_req.idx.getOrElse(s1_req.addr) >> tagLSB, s1_req.addr(tagLSB-1, 0)) val s0_tlb_req = WireInit(io.tlb_port.req.bits) when (!io.tlb_port.req.fire) { s0_tlb_req.passthrough := s0_req.phys s0_tlb_req.vaddr := s0_req.addr s0_tlb_req.size := s0_req.size s0_tlb_req.cmd := s0_req.cmd s0_tlb_req.prv := s0_req.dprv s0_tlb_req.v := s0_req.dv } val s1_tlb_req = RegEnable(s0_tlb_req, s0_clk_en || io.tlb_port.req.valid) val s1_read = isRead(s1_req.cmd) val s1_write = isWrite(s1_req.cmd) val s1_readwrite = s1_read || s1_write val s1_sfence = s1_req.cmd === M_SFENCE || s1_req.cmd === M_HFENCEV || s1_req.cmd === M_HFENCEG val s1_flush_line = s1_req.cmd === M_FLUSH_ALL && s1_req.size(0) val s1_flush_valid = Reg(Bool()) val s1_waw_hazard = Wire(Bool()) val s_ready :: s_voluntary_writeback :: s_probe_rep_dirty :: s_probe_rep_clean :: s_probe_retry :: s_probe_rep_miss :: s_voluntary_write_meta :: s_probe_write_meta :: s_dummy :: s_voluntary_release :: Nil = Enum(10) val supports_flush = outer.flushOnFenceI || coreParams.haveCFlush val flushed = RegInit(true.B) val flushing = RegInit(false.B) val flushing_req = Reg(chiselTypeOf(s1_req)) val cached_grant_wait = RegInit(false.B) val resetting = RegInit(false.B) val flushCounter = RegInit((nSets * (nWays-1)).U(log2Ceil(nSets * nWays).W)) val release_ack_wait = RegInit(false.B) val release_ack_addr = Reg(UInt(paddrBits.W)) val release_state = RegInit(s_ready) val refill_way = Reg(UInt()) val any_pstore_valid = Wire(Bool()) val inWriteback = release_state.isOneOf(s_voluntary_writeback, s_probe_rep_dirty) val releaseWay = Wire(UInt()) io.cpu.req.ready := (release_state === s_ready) && !cached_grant_wait && !s1_nack // I/O MSHRs val uncachedInFlight = RegInit(VecInit(Seq.fill(maxUncachedInFlight)(false.B))) val uncachedReqs = Reg(Vec(maxUncachedInFlight, new HellaCacheReq)) val uncachedResp = WireInit(new HellaCacheReq, DontCare) // hit initiation path val s0_read = isRead(io.cpu.req.bits.cmd) dataArb.io.in(3).valid := io.cpu.req.valid && likelyNeedsRead(io.cpu.req.bits) dataArb.io.in(3).bits := dataArb.io.in(1).bits dataArb.io.in(3).bits.write := false.B dataArb.io.in(3).bits.addr := Cat(io.cpu.req.bits.idx.getOrElse(io.cpu.req.bits.addr) >> tagLSB, io.cpu.req.bits.addr(tagLSB-1, 0)) dataArb.io.in(3).bits.wordMask := { val mask = (subWordBytes.log2 until rowOffBits).foldLeft(1.U) { case (in, i) => val upper_mask = Mux((i >= wordBytes.log2).B || io.cpu.req.bits.size <= i.U, 0.U, ((BigInt(1) << (1 << (i - subWordBytes.log2)))-1).U) val upper = Mux(io.cpu.req.bits.addr(i), in, 0.U) | upper_mask val lower = Mux(io.cpu.req.bits.addr(i), 0.U, in) upper ## lower } Fill(subWordBytes / eccBytes, mask) } dataArb.io.in(3).bits.eccMask := ~0.U((wordBytes / eccBytes).W) dataArb.io.in(3).bits.way_en := ~0.U(nWays.W) when (!dataArb.io.in(3).ready && s0_read) { io.cpu.req.ready := false.B } val s1_did_read = RegEnable(dataArb.io.in(3).ready && (io.cpu.req.valid && needsRead(io.cpu.req.bits)), s0_clk_en) val s1_read_mask = RegEnable(dataArb.io.in(3).bits.wordMask, s0_clk_en) metaArb.io.in(7).valid := io.cpu.req.valid metaArb.io.in(7).bits.write := false.B metaArb.io.in(7).bits.idx := dataArb.io.in(3).bits.addr(idxMSB, idxLSB) metaArb.io.in(7).bits.addr := io.cpu.req.bits.addr metaArb.io.in(7).bits.way_en := metaArb.io.in(4).bits.way_en metaArb.io.in(7).bits.data := metaArb.io.in(4).bits.data when (!metaArb.io.in(7).ready) { io.cpu.req.ready := false.B } // address translation val s1_cmd_uses_tlb = s1_readwrite || s1_flush_line || s1_req.cmd === M_WOK io.ptw <> tlb.io.ptw tlb.io.kill := io.cpu.s2_kill || s2_tlb_req_valid && io.tlb_port.s2_kill tlb.io.req.valid := s1_tlb_req_valid || s1_valid && !io.cpu.s1_kill && s1_cmd_uses_tlb tlb.io.req.bits := s1_tlb_req when (!tlb.io.req.ready && !tlb.io.ptw.resp.valid && !io.cpu.req.bits.phys) { io.cpu.req.ready := false.B } when (!s1_tlb_req_valid && s1_valid && s1_cmd_uses_tlb && tlb.io.resp.miss) { s1_nack := true.B } tlb.io.sfence.valid := s1_valid && !io.cpu.s1_kill && s1_sfence tlb.io.sfence.bits.rs1 := s1_req.size(0) tlb.io.sfence.bits.rs2 := s1_req.size(1) tlb.io.sfence.bits.asid := io.cpu.s1_data.data tlb.io.sfence.bits.addr := s1_req.addr tlb.io.sfence.bits.hv := s1_req.cmd === M_HFENCEV tlb.io.sfence.bits.hg := s1_req.cmd === M_HFENCEG io.tlb_port.req.ready := clock_en_reg io.tlb_port.s1_resp := tlb.io.resp when (s1_tlb_req_valid && s1_valid && !(s1_req.phys && s1_req.no_xcpt)) { s1_nack := true.B } pma_checker.io <> DontCare pma_checker.io.req.bits.passthrough := true.B pma_checker.io.req.bits.vaddr := s1_req.addr pma_checker.io.req.bits.size := s1_req.size pma_checker.io.req.bits.cmd := s1_req.cmd pma_checker.io.req.bits.prv := s1_req.dprv pma_checker.io.req.bits.v := s1_req.dv val s1_paddr = Cat(Mux(s1_tlb_req_valid, s1_req.addr(paddrBits-1, pgIdxBits), tlb.io.resp.paddr >> pgIdxBits), s1_req.addr(pgIdxBits-1, 0)) val s1_victim_way = Wire(UInt()) val (s1_hit_way, s1_hit_state, s1_meta) = if (usingDataScratchpad) { val baseAddr = p(LookupByHartId)(_.dcache.flatMap(_.scratch.map(_.U)), io_hartid.get) | io_mmio_address_prefix.get val inScratchpad = s1_paddr >= baseAddr && s1_paddr < baseAddr + (nSets * cacheBlockBytes).U val hitState = Mux(inScratchpad, ClientMetadata.maximum, ClientMetadata.onReset) val dummyMeta = L1Metadata(0.U, ClientMetadata.onReset) (inScratchpad, hitState, Seq(tECC.encode(dummyMeta.asUInt))) } else { val metaReq = metaArb.io.out val metaIdx = metaReq.bits.idx when (metaReq.valid && metaReq.bits.write) { val wmask = if (nWays == 1) Seq(true.B) else metaReq.bits.way_en.asBools tag_array.write(metaIdx, VecInit(Seq.fill(nWays)(metaReq.bits.data)), wmask) } val s1_meta = tag_array.read(metaIdx, metaReq.valid && !metaReq.bits.write) val s1_meta_uncorrected = s1_meta.map(tECC.decode(_).uncorrected.asTypeOf(new L1Metadata)) val s1_tag = s1_paddr >> tagLSB val s1_meta_hit_way = s1_meta_uncorrected.map(r => r.coh.isValid() && r.tag === s1_tag).asUInt val s1_meta_hit_state = ( s1_meta_uncorrected.map(r => Mux(r.tag === s1_tag && !s1_flush_valid, r.coh.asUInt, 0.U)) .reduce (_|_)).asTypeOf(chiselTypeOf(ClientMetadata.onReset)) (s1_meta_hit_way, s1_meta_hit_state, s1_meta) } val s1_data_way = WireDefault(if (nWays == 1) 1.U else Mux(inWriteback, releaseWay, s1_hit_way)) val tl_d_data_encoded = Wire(chiselTypeOf(encodeData(tl_out.d.bits.data, false.B))) val s1_all_data_ways = VecInit(data.io.resp ++ (!cacheParams.separateUncachedResp).option(tl_d_data_encoded)) val s1_mask_xwr = new StoreGen(s1_req.size, s1_req.addr, 0.U, wordBytes).mask val s1_mask = Mux(s1_req.cmd === M_PWR, io.cpu.s1_data.mask, s1_mask_xwr) // for partial writes, s1_data.mask must be a subset of s1_mask_xwr assert(!(s1_valid_masked && s1_req.cmd === M_PWR) || (s1_mask_xwr | ~io.cpu.s1_data.mask).andR) val s2_valid = RegNext(s1_valid_masked && !s1_sfence, init=false.B) val s2_valid_no_xcpt = s2_valid && !io.cpu.s2_xcpt.asUInt.orR val s2_probe = RegNext(s1_probe, init=false.B) val releaseInFlight = s1_probe || s2_probe || release_state =/= s_ready val s2_not_nacked_in_s1 = RegNext(!s1_nack) val s2_valid_not_nacked_in_s1 = s2_valid && s2_not_nacked_in_s1 val s2_valid_masked = s2_valid_no_xcpt && s2_not_nacked_in_s1 val s2_valid_not_killed = s2_valid_masked && !io.cpu.s2_kill val s2_req = Reg(chiselTypeOf(io.cpu.req.bits)) val s2_cmd_flush_all = s2_req.cmd === M_FLUSH_ALL && !s2_req.size(0) val s2_cmd_flush_line = s2_req.cmd === M_FLUSH_ALL && s2_req.size(0) val s2_tlb_xcpt = Reg(chiselTypeOf(tlb.io.resp)) val s2_pma = Reg(chiselTypeOf(tlb.io.resp)) val s2_uncached_resp_addr = Reg(chiselTypeOf(s2_req.addr)) // should be DCE'd in synthesis when (s1_valid_not_nacked || s1_flush_valid) { s2_req := s1_req s2_req.addr := s1_paddr s2_tlb_xcpt := tlb.io.resp s2_pma := Mux(s1_tlb_req_valid, pma_checker.io.resp, tlb.io.resp) } val s2_vaddr = Cat(RegEnable(s1_vaddr, s1_valid_not_nacked || s1_flush_valid) >> tagLSB, s2_req.addr(tagLSB-1, 0)) val s2_read = isRead(s2_req.cmd) val s2_write = isWrite(s2_req.cmd) val s2_readwrite = s2_read || s2_write val s2_flush_valid_pre_tag_ecc = RegNext(s1_flush_valid) val s1_meta_decoded = s1_meta.map(tECC.decode(_)) val s1_meta_clk_en = s1_valid_not_nacked || s1_flush_valid || s1_probe val s2_meta_correctable_errors = s1_meta_decoded.map(m => RegEnable(m.correctable, s1_meta_clk_en)).asUInt val s2_meta_uncorrectable_errors = s1_meta_decoded.map(m => RegEnable(m.uncorrectable, s1_meta_clk_en)).asUInt val s2_meta_error_uncorrectable = s2_meta_uncorrectable_errors.orR val s2_meta_corrected = s1_meta_decoded.map(m => RegEnable(m.corrected, s1_meta_clk_en).asTypeOf(new L1Metadata)) val s2_meta_error = (s2_meta_uncorrectable_errors | s2_meta_correctable_errors).orR val s2_flush_valid = s2_flush_valid_pre_tag_ecc && !s2_meta_error val s2_data = { val wordsPerRow = rowBits / subWordBits val en = s1_valid || inWriteback || io.cpu.replay_next val word_en = Mux(inWriteback, Fill(wordsPerRow, 1.U), Mux(s1_did_read, s1_read_mask, 0.U)) val s1_way_words = s1_all_data_ways.map(_.grouped(dECC.width(eccBits) * (subWordBits / eccBits))) if (cacheParams.pipelineWayMux) { val s1_word_en = Mux(io.cpu.replay_next, 0.U, word_en) (for (i <- 0 until wordsPerRow) yield { val s2_way_en = RegEnable(Mux(s1_word_en(i), s1_data_way, 0.U), en) val s2_way_words = (0 until nWays).map(j => RegEnable(s1_way_words(j)(i), en && word_en(i))) (0 until nWays).map(j => Mux(s2_way_en(j), s2_way_words(j), 0.U)).reduce(_|_) }).asUInt } else { val s1_word_en = Mux(!io.cpu.replay_next, word_en, UIntToOH(uncachedResp.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes)), wordsPerRow)) (for (i <- 0 until wordsPerRow) yield { RegEnable(Mux1H(Mux(s1_word_en(i), s1_data_way, 0.U), s1_way_words.map(_(i))), en) }).asUInt } } val s2_probe_way = RegEnable(s1_hit_way, s1_probe) val s2_probe_state = RegEnable(s1_hit_state, s1_probe) val s2_hit_way = RegEnable(s1_hit_way, s1_valid_not_nacked) val s2_hit_state = RegEnable(s1_hit_state, s1_valid_not_nacked || s1_flush_valid) val s2_waw_hazard = RegEnable(s1_waw_hazard, s1_valid_not_nacked) val s2_store_merge = Wire(Bool()) val s2_hit_valid = s2_hit_state.isValid() val (s2_hit, s2_grow_param, s2_new_hit_state) = s2_hit_state.onAccess(s2_req.cmd) val s2_data_decoded = decodeData(s2_data) val s2_word_idx = s2_req.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes)) val s2_data_error = s2_data_decoded.map(_.error).orR val s2_data_error_uncorrectable = s2_data_decoded.map(_.uncorrectable).orR val s2_data_corrected = (s2_data_decoded.map(_.corrected): Seq[UInt]).asUInt val s2_data_uncorrected = (s2_data_decoded.map(_.uncorrected): Seq[UInt]).asUInt val s2_valid_hit_maybe_flush_pre_data_ecc_and_waw = s2_valid_masked && !s2_meta_error && s2_hit val s2_no_alloc_hazard = if (!usingVM || pgIdxBits >= untagBits) false.B else { // make sure that any in-flight non-allocating accesses are ordered before // any allocating accesses. this can only happen if aliasing is possible. val any_no_alloc_in_flight = Reg(Bool()) when (!uncachedInFlight.asUInt.orR) { any_no_alloc_in_flight := false.B } when (s2_valid && s2_req.no_alloc) { any_no_alloc_in_flight := true.B } val s1_need_check = any_no_alloc_in_flight || s2_valid && s2_req.no_alloc val concerns = (uncachedInFlight zip uncachedReqs) :+ (s2_valid && s2_req.no_alloc, s2_req) val s1_uncached_hits = concerns.map { c => val concern_wmask = new StoreGen(c._2.size, c._2.addr, 0.U, wordBytes).mask val addr_match = (c._2.addr ^ s1_paddr)(pgIdxBits+pgLevelBits-1, wordBytes.log2) === 0.U val mask_match = (concern_wmask & s1_mask_xwr).orR || c._2.cmd === M_PWR || s1_req.cmd === M_PWR val cmd_match = isWrite(c._2.cmd) || isWrite(s1_req.cmd) c._1 && s1_need_check && cmd_match && addr_match && mask_match } val s2_uncached_hits = RegEnable(s1_uncached_hits.asUInt, s1_valid_not_nacked) s2_uncached_hits.orR } val s2_valid_hit_pre_data_ecc_and_waw = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw && s2_readwrite && !s2_no_alloc_hazard val s2_valid_flush_line = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw && s2_cmd_flush_line val s2_valid_hit_pre_data_ecc = s2_valid_hit_pre_data_ecc_and_waw && (!s2_waw_hazard || s2_store_merge) val s2_valid_data_error = s2_valid_hit_pre_data_ecc_and_waw && s2_data_error val s2_valid_hit = s2_valid_hit_pre_data_ecc && !s2_data_error val s2_valid_miss = s2_valid_masked && s2_readwrite && !s2_meta_error && !s2_hit val s2_uncached = !s2_pma.cacheable || s2_req.no_alloc && !s2_pma.must_alloc && !s2_hit_valid val s2_valid_cached_miss = s2_valid_miss && !s2_uncached && !uncachedInFlight.asUInt.orR dontTouch(s2_valid_cached_miss) val s2_want_victimize = (!usingDataScratchpad).B && (s2_valid_cached_miss || s2_valid_flush_line || s2_valid_data_error || s2_flush_valid) val s2_cannot_victimize = !s2_flush_valid && io.cpu.s2_kill val s2_victimize = s2_want_victimize && !s2_cannot_victimize val s2_valid_uncached_pending = s2_valid_miss && s2_uncached && !uncachedInFlight.asUInt.andR val s2_victim_way = UIntToOH(RegEnable(s1_victim_way, s1_valid_not_nacked || s1_flush_valid)) val s2_victim_or_hit_way = Mux(s2_hit_valid, s2_hit_way, s2_victim_way) val s2_victim_tag = Mux(s2_valid_data_error || s2_valid_flush_line, s2_req.addr(paddrBits-1, tagLSB), Mux1H(s2_victim_way, s2_meta_corrected).tag) val s2_victim_state = Mux(s2_hit_valid, s2_hit_state, Mux1H(s2_victim_way, s2_meta_corrected).coh) val (s2_prb_ack_data, s2_report_param, probeNewCoh)= s2_probe_state.onProbe(probe_bits.param) val (s2_victim_dirty, s2_shrink_param, voluntaryNewCoh) = s2_victim_state.onCacheControl(M_FLUSH) dontTouch(s2_victim_dirty) val s2_update_meta = s2_hit_state =/= s2_new_hit_state val s2_dont_nack_uncached = s2_valid_uncached_pending && tl_out_a.ready val s2_dont_nack_misc = s2_valid_masked && !s2_meta_error && (supports_flush.B && s2_cmd_flush_all && flushed && !flushing || supports_flush.B && s2_cmd_flush_line && !s2_hit || s2_req.cmd === M_WOK) io.cpu.s2_nack := s2_valid_no_xcpt && !s2_dont_nack_uncached && !s2_dont_nack_misc && !s2_valid_hit when (io.cpu.s2_nack || (s2_valid_hit_pre_data_ecc_and_waw && s2_update_meta)) { s1_nack := true.B } // tag updates on ECC errors val s2_first_meta_corrected = PriorityMux(s2_meta_correctable_errors, s2_meta_corrected) metaArb.io.in(1).valid := s2_meta_error && (s2_valid_masked || s2_flush_valid_pre_tag_ecc || s2_probe) metaArb.io.in(1).bits.write := true.B metaArb.io.in(1).bits.way_en := s2_meta_uncorrectable_errors | Mux(s2_meta_error_uncorrectable, 0.U, PriorityEncoderOH(s2_meta_correctable_errors)) metaArb.io.in(1).bits.idx := Mux(s2_probe, probeIdx(probe_bits), s2_vaddr(idxMSB, idxLSB)) metaArb.io.in(1).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, metaArb.io.in(1).bits.idx << blockOffBits) metaArb.io.in(1).bits.data := tECC.encode { val new_meta = WireDefault(s2_first_meta_corrected) when (s2_meta_error_uncorrectable) { new_meta.coh := ClientMetadata.onReset } new_meta.asUInt } // tag updates on hit metaArb.io.in(2).valid := s2_valid_hit_pre_data_ecc_and_waw && s2_update_meta metaArb.io.in(2).bits.write := !io.cpu.s2_kill metaArb.io.in(2).bits.way_en := s2_victim_or_hit_way metaArb.io.in(2).bits.idx := s2_vaddr(idxMSB, idxLSB) metaArb.io.in(2).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, s2_vaddr(idxMSB, 0)) metaArb.io.in(2).bits.data := tECC.encode(L1Metadata(s2_req.addr >> tagLSB, s2_new_hit_state).asUInt) // load reservations and TL error reporting val s2_lr = (usingAtomics && !usingDataScratchpad).B && s2_req.cmd === M_XLR val s2_sc = (usingAtomics && !usingDataScratchpad).B && s2_req.cmd === M_XSC val lrscCount = RegInit(0.U) val lrscValid = lrscCount > lrscBackoff.U val lrscBackingOff = lrscCount > 0.U && !lrscValid val lrscAddr = Reg(UInt()) val lrscAddrMatch = lrscAddr === (s2_req.addr >> blockOffBits) val s2_sc_fail = s2_sc && !(lrscValid && lrscAddrMatch) when ((s2_valid_hit && s2_lr && !cached_grant_wait || s2_valid_cached_miss) && !io.cpu.s2_kill) { lrscCount := Mux(s2_hit, (lrscCycles - 1).U, 0.U) lrscAddr := s2_req.addr >> blockOffBits } when (lrscCount > 0.U) { lrscCount := lrscCount - 1.U } when (s2_valid_not_killed && lrscValid) { lrscCount := lrscBackoff.U } when (s1_probe) { lrscCount := 0.U } // don't perform data correction if it might clobber a recent store val s2_correct = s2_data_error && !any_pstore_valid && !RegNext(any_pstore_valid || s2_valid) && usingDataScratchpad.B // pending store buffer val s2_valid_correct = s2_valid_hit_pre_data_ecc_and_waw && s2_correct && !io.cpu.s2_kill def s2_store_valid_pre_kill = s2_valid_hit && s2_write && !s2_sc_fail def s2_store_valid = s2_store_valid_pre_kill && !io.cpu.s2_kill val pstore1_cmd = RegEnable(s1_req.cmd, s1_valid_not_nacked && s1_write) val pstore1_addr = RegEnable(s1_vaddr, s1_valid_not_nacked && s1_write) val pstore1_data = RegEnable(io.cpu.s1_data.data, s1_valid_not_nacked && s1_write) val pstore1_way = RegEnable(s1_hit_way, s1_valid_not_nacked && s1_write) val pstore1_mask = RegEnable(s1_mask, s1_valid_not_nacked && s1_write) val pstore1_storegen_data = WireDefault(pstore1_data) val pstore1_rmw = usingRMW.B && RegEnable(needsRead(s1_req), s1_valid_not_nacked && s1_write) val pstore1_merge_likely = s2_valid_not_nacked_in_s1 && s2_write && s2_store_merge val pstore1_merge = s2_store_valid && s2_store_merge val pstore2_valid = RegInit(false.B) val pstore_drain_opportunistic = !(io.cpu.req.valid && likelyNeedsRead(io.cpu.req.bits)) && !(s1_valid && s1_waw_hazard) val pstore_drain_on_miss = releaseInFlight || RegNext(io.cpu.s2_nack) val pstore1_held = RegInit(false.B) val pstore1_valid_likely = s2_valid && s2_write || pstore1_held def pstore1_valid_not_rmw(s2_kill: Bool) = s2_valid_hit_pre_data_ecc && s2_write && !s2_kill || pstore1_held val pstore1_valid = s2_store_valid || pstore1_held any_pstore_valid := pstore1_held || pstore2_valid val pstore_drain_structural = pstore1_valid_likely && pstore2_valid && ((s1_valid && s1_write) || pstore1_rmw) assert(pstore1_rmw || pstore1_valid_not_rmw(io.cpu.s2_kill) === pstore1_valid) ccover(pstore_drain_structural, "STORE_STRUCTURAL_HAZARD", "D$ read-modify-write structural hazard") ccover(pstore1_valid && pstore_drain_on_miss, "STORE_DRAIN_ON_MISS", "D$ store buffer drain on miss") ccover(s1_valid_not_nacked && s1_waw_hazard, "WAW_HAZARD", "D$ write-after-write hazard") def should_pstore_drain(truly: Bool) = { val s2_kill = truly && io.cpu.s2_kill !pstore1_merge_likely && (usingRMW.B && pstore_drain_structural || (((pstore1_valid_not_rmw(s2_kill) && !pstore1_rmw) || pstore2_valid) && (pstore_drain_opportunistic || pstore_drain_on_miss))) } val pstore_drain = should_pstore_drain(true.B) pstore1_held := (s2_store_valid && !s2_store_merge || pstore1_held) && pstore2_valid && !pstore_drain val advance_pstore1 = (pstore1_valid || s2_valid_correct) && (pstore2_valid === pstore_drain) pstore2_valid := pstore2_valid && !pstore_drain || advance_pstore1 val pstore2_addr = RegEnable(Mux(s2_correct, s2_vaddr, pstore1_addr), advance_pstore1) val pstore2_way = RegEnable(Mux(s2_correct, s2_hit_way, pstore1_way), advance_pstore1) val pstore2_storegen_data = { for (i <- 0 until wordBytes) yield RegEnable(pstore1_storegen_data(8*(i+1)-1, 8*i), advance_pstore1 || pstore1_merge && pstore1_mask(i)) }.asUInt val pstore2_storegen_mask = { val mask = Reg(UInt(wordBytes.W)) when (advance_pstore1 || pstore1_merge) { val mergedMask = pstore1_mask | Mux(pstore1_merge, mask, 0.U) mask := ~Mux(s2_correct, 0.U, ~mergedMask) } mask } s2_store_merge := (if (eccBytes == 1) false.B else { ccover(pstore1_merge, "STORE_MERGED", "D$ store merged") // only merge stores to ECC granules that are already stored-to, to avoid // WAW hazards val wordMatch = (eccMask(pstore2_storegen_mask) | ~eccMask(pstore1_mask)).andR val idxMatch = s2_vaddr(untagBits-1, log2Ceil(wordBytes)) === pstore2_addr(untagBits-1, log2Ceil(wordBytes)) val tagMatch = (s2_hit_way & pstore2_way).orR pstore2_valid && wordMatch && idxMatch && tagMatch }) dataArb.io.in(0).valid := should_pstore_drain(false.B) dataArb.io.in(0).bits.write := pstore_drain dataArb.io.in(0).bits.addr := Mux(pstore2_valid, pstore2_addr, pstore1_addr) dataArb.io.in(0).bits.way_en := Mux(pstore2_valid, pstore2_way, pstore1_way) dataArb.io.in(0).bits.wdata := encodeData(Fill(rowWords, Mux(pstore2_valid, pstore2_storegen_data, pstore1_data)), false.B) dataArb.io.in(0).bits.wordMask := { val eccMask = dataArb.io.in(0).bits.eccMask.asBools.grouped(subWordBytes/eccBytes).map(_.orR).toSeq.asUInt val wordMask = UIntToOH(Mux(pstore2_valid, pstore2_addr, pstore1_addr).extract(rowOffBits-1, wordBytes.log2)) FillInterleaved(wordBytes/subWordBytes, wordMask) & Fill(rowBytes/wordBytes, eccMask) } dataArb.io.in(0).bits.eccMask := eccMask(Mux(pstore2_valid, pstore2_storegen_mask, pstore1_mask)) // store->load RAW hazard detection def s1Depends(addr: UInt, mask: UInt) = addr(idxMSB, wordOffBits) === s1_vaddr(idxMSB, wordOffBits) && Mux(s1_write, (eccByteMask(mask) & eccByteMask(s1_mask_xwr)).orR, (mask & s1_mask_xwr).orR) val s1_hazard = (pstore1_valid_likely && s1Depends(pstore1_addr, pstore1_mask)) || (pstore2_valid && s1Depends(pstore2_addr, pstore2_storegen_mask)) val s1_raw_hazard = s1_read && s1_hazard s1_waw_hazard := (if (eccBytes == 1) false.B else { ccover(s1_valid_not_nacked && s1_waw_hazard, "WAW_HAZARD", "D$ write-after-write hazard") s1_write && (s1_hazard || needsRead(s1_req) && !s1_did_read) }) when (s1_valid && s1_raw_hazard) { s1_nack := true.B } // performance hints to processor io.cpu.s2_nack_cause_raw := RegNext(s1_raw_hazard) || !(!s2_waw_hazard || s2_store_merge) // Prepare a TileLink request message that initiates a transaction val a_source = PriorityEncoder(~uncachedInFlight.asUInt << mmioOffset) // skip the MSHR val acquire_address = (s2_req.addr >> idxLSB) << idxLSB val access_address = s2_req.addr val a_size = s2_req.size val a_data = Fill(beatWords, pstore1_data) val a_mask = pstore1_mask << (access_address.extract(beatBytes.log2-1, wordBytes.log2) << 3) val get = edge.Get(a_source, access_address, a_size)._2 val put = edge.Put(a_source, access_address, a_size, a_data)._2 val putpartial = edge.Put(a_source, access_address, a_size, a_data, a_mask)._2 val atomics = if (edge.manager.anySupportLogical) { MuxLookup(s2_req.cmd, WireDefault(0.U.asTypeOf(new TLBundleA(edge.bundle))))(Array( M_XA_SWAP -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.SWAP)._2, M_XA_XOR -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.XOR) ._2, M_XA_OR -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.OR) ._2, M_XA_AND -> edge.Logical(a_source, access_address, a_size, a_data, TLAtomics.AND) ._2, M_XA_ADD -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.ADD)._2, M_XA_MIN -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MIN)._2, M_XA_MAX -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MAX)._2, M_XA_MINU -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MINU)._2, M_XA_MAXU -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MAXU)._2)) } else { // If no managers support atomics, assert fail if processor asks for them assert (!(tl_out_a.valid && s2_read && s2_write && s2_uncached)) WireDefault(new TLBundleA(edge.bundle), DontCare) } tl_out_a.valid := !io.cpu.s2_kill && (s2_valid_uncached_pending || (s2_valid_cached_miss && !(release_ack_wait && (s2_req.addr ^ release_ack_addr)(((pgIdxBits + pgLevelBits) min paddrBits) - 1, idxLSB) === 0.U) && (cacheParams.acquireBeforeRelease.B && !release_ack_wait && release_queue_empty || !s2_victim_dirty))) tl_out_a.bits := Mux(!s2_uncached, acquire(s2_vaddr, s2_req.addr, s2_grow_param), Mux(!s2_write, get, Mux(s2_req.cmd === M_PWR, putpartial, Mux(!s2_read, put, atomics)))) // Drive APROT Bits tl_out_a.bits.user.lift(AMBAProt).foreach { x => val user_bit_cacheable = s2_pma.cacheable x.privileged := s2_req.dprv === PRV.M.U || user_bit_cacheable // if the address is cacheable, enable outer caches x.bufferable := user_bit_cacheable x.modifiable := user_bit_cacheable x.readalloc := user_bit_cacheable x.writealloc := user_bit_cacheable // Following are always tied off x.fetch := false.B x.secure := true.B } // Set pending bits for outstanding TileLink transaction val a_sel = UIntToOH(a_source, maxUncachedInFlight+mmioOffset) >> mmioOffset when (tl_out_a.fire) { when (s2_uncached) { (a_sel.asBools zip (uncachedInFlight zip uncachedReqs)) foreach { case (s, (f, r)) => when (s) { f := true.B r := s2_req r.cmd := Mux(s2_write, Mux(s2_req.cmd === M_PWR, M_PWR, M_XWR), M_XRD) } } }.otherwise { cached_grant_wait := true.B refill_way := s2_victim_or_hit_way } } // grant val (d_first, d_last, d_done, d_address_inc) = edge.addr_inc(tl_out.d) val (d_opc, grantIsUncached, grantIsUncachedData) = { val uncachedGrantOpcodesSansData = Seq(AccessAck, HintAck) val uncachedGrantOpcodesWithData = Seq(AccessAckData) val uncachedGrantOpcodes = uncachedGrantOpcodesWithData ++ uncachedGrantOpcodesSansData val whole_opc = tl_out.d.bits.opcode if (usingDataScratchpad) { assert(!tl_out.d.valid || whole_opc.isOneOf(uncachedGrantOpcodes)) // the only valid TL-D messages are uncached, so we can do some pruning val opc = whole_opc(uncachedGrantOpcodes.map(_.getWidth).max - 1, 0) val data = DecodeLogic(opc, uncachedGrantOpcodesWithData, uncachedGrantOpcodesSansData) (opc, true.B, data) } else { (whole_opc, whole_opc.isOneOf(uncachedGrantOpcodes), whole_opc.isOneOf(uncachedGrantOpcodesWithData)) } } tl_d_data_encoded := encodeData(tl_out.d.bits.data, tl_out.d.bits.corrupt && !io.ptw.customCSRs.suppressCorruptOnGrantData && !grantIsUncached) val grantIsCached = d_opc.isOneOf(Grant, GrantData) val grantIsVoluntary = d_opc === ReleaseAck // Clears a different pending bit val grantIsRefill = d_opc === GrantData // Writes the data array val grantInProgress = RegInit(false.B) val blockProbeAfterGrantCount = RegInit(0.U) when (blockProbeAfterGrantCount > 0.U) { blockProbeAfterGrantCount := blockProbeAfterGrantCount - 1.U } val canAcceptCachedGrant = !release_state.isOneOf(s_voluntary_writeback, s_voluntary_write_meta, s_voluntary_release) tl_out.d.ready := Mux(grantIsCached, (!d_first || tl_out.e.ready) && canAcceptCachedGrant, true.B) val uncachedRespIdxOH = UIntToOH(tl_out.d.bits.source, maxUncachedInFlight+mmioOffset) >> mmioOffset uncachedResp := Mux1H(uncachedRespIdxOH, uncachedReqs) when (tl_out.d.fire) { when (grantIsCached) { grantInProgress := true.B assert(cached_grant_wait, "A GrantData was unexpected by the dcache.") when(d_last) { cached_grant_wait := false.B grantInProgress := false.B blockProbeAfterGrantCount := (blockProbeAfterGrantCycles - 1).U replacer.miss } } .elsewhen (grantIsUncached) { (uncachedRespIdxOH.asBools zip uncachedInFlight) foreach { case (s, f) => when (s && d_last) { assert(f, "An AccessAck was unexpected by the dcache.") // TODO must handle Ack coming back on same cycle! f := false.B } } when (grantIsUncachedData) { if (!cacheParams.separateUncachedResp) { if (!cacheParams.pipelineWayMux) s1_data_way := 1.U << nWays s2_req.cmd := M_XRD s2_req.size := uncachedResp.size s2_req.signed := uncachedResp.signed s2_req.tag := uncachedResp.tag s2_req.addr := { require(rowOffBits >= beatOffBits) val dontCareBits = s1_paddr >> rowOffBits << rowOffBits dontCareBits | uncachedResp.addr(beatOffBits-1, 0) } s2_uncached_resp_addr := uncachedResp.addr } } } .elsewhen (grantIsVoluntary) { assert(release_ack_wait, "A ReleaseAck was unexpected by the dcache.") // TODO should handle Ack coming back on same cycle! release_ack_wait := false.B } } // Finish TileLink transaction by issuing a GrantAck tl_out.e.valid := tl_out.d.valid && d_first && grantIsCached && canAcceptCachedGrant tl_out.e.bits := edge.GrantAck(tl_out.d.bits) assert(tl_out.e.fire === (tl_out.d.fire && d_first && grantIsCached)) // data refill // note this ready-valid signaling ignores E-channel backpressure, which // benignly means the data RAM might occasionally be redundantly written dataArb.io.in(1).valid := tl_out.d.valid && grantIsRefill && canAcceptCachedGrant when (grantIsRefill && !dataArb.io.in(1).ready) { tl_out.e.valid := false.B tl_out.d.ready := false.B } if (!usingDataScratchpad) { dataArb.io.in(1).bits.write := true.B dataArb.io.in(1).bits.addr := (s2_vaddr >> idxLSB) << idxLSB | d_address_inc dataArb.io.in(1).bits.way_en := refill_way dataArb.io.in(1).bits.wdata := tl_d_data_encoded dataArb.io.in(1).bits.wordMask := ~0.U((rowBytes / subWordBytes).W) dataArb.io.in(1).bits.eccMask := ~0.U((wordBytes / eccBytes).W) } else { dataArb.io.in(1).bits := dataArb.io.in(0).bits } // tag updates on refill // ignore backpressure from metaArb, which can only be caused by tag ECC // errors on hit-under-miss. failing to write the new tag will leave the // line invalid, so we'll simply request the line again later. metaArb.io.in(3).valid := grantIsCached && d_done && !tl_out.d.bits.denied metaArb.io.in(3).bits.write := true.B metaArb.io.in(3).bits.way_en := refill_way metaArb.io.in(3).bits.idx := s2_vaddr(idxMSB, idxLSB) metaArb.io.in(3).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, s2_vaddr(idxMSB, 0)) metaArb.io.in(3).bits.data := tECC.encode(L1Metadata(s2_req.addr >> tagLSB, s2_hit_state.onGrant(s2_req.cmd, tl_out.d.bits.param)).asUInt) if (!cacheParams.separateUncachedResp) { // don't accept uncached grants if there's a structural hazard on s2_data... val blockUncachedGrant = Reg(Bool()) blockUncachedGrant := dataArb.io.out.valid when (grantIsUncachedData && (blockUncachedGrant || s1_valid)) { tl_out.d.ready := false.B // ...but insert bubble to guarantee grant's eventual forward progress when (tl_out.d.valid) { io.cpu.req.ready := false.B dataArb.io.in(1).valid := true.B dataArb.io.in(1).bits.write := false.B blockUncachedGrant := !dataArb.io.in(1).ready } } } ccover(tl_out.d.valid && !tl_out.d.ready, "BLOCK_D", "D$ D-channel blocked") // Handle an incoming TileLink Probe message val block_probe_for_core_progress = blockProbeAfterGrantCount > 0.U || lrscValid val block_probe_for_pending_release_ack = release_ack_wait && (tl_out.b.bits.address ^ release_ack_addr)(((pgIdxBits + pgLevelBits) min paddrBits) - 1, idxLSB) === 0.U val block_probe_for_ordering = releaseInFlight || block_probe_for_pending_release_ack || grantInProgress metaArb.io.in(6).valid := tl_out.b.valid && (!block_probe_for_core_progress || lrscBackingOff) tl_out.b.ready := metaArb.io.in(6).ready && !(block_probe_for_core_progress || block_probe_for_ordering || s1_valid || s2_valid) metaArb.io.in(6).bits.write := false.B metaArb.io.in(6).bits.idx := probeIdx(tl_out.b.bits) metaArb.io.in(6).bits.addr := Cat(io.cpu.req.bits.addr >> paddrBits, tl_out.b.bits.address) metaArb.io.in(6).bits.way_en := metaArb.io.in(4).bits.way_en metaArb.io.in(6).bits.data := metaArb.io.in(4).bits.data // replacement policy s1_victim_way := (if (replacer.perSet && nWays > 1) { val repl_array = Mem(nSets, UInt(replacer.nBits.W)) val s1_repl_idx = s1_req.addr(idxBits+blockOffBits-1, blockOffBits) val s2_repl_idx = s2_vaddr(idxBits+blockOffBits-1, blockOffBits) val s2_repl_state = Reg(UInt(replacer.nBits.W)) val s2_new_repl_state = replacer.get_next_state(s2_repl_state, OHToUInt(s2_hit_way)) val s2_repl_wen = s2_valid_masked && s2_hit_way.orR && s2_repl_state =/= s2_new_repl_state val s1_repl_state = Mux(s2_repl_wen && s2_repl_idx === s1_repl_idx, s2_new_repl_state, repl_array(s1_repl_idx)) when (s1_valid_not_nacked) { s2_repl_state := s1_repl_state } val waddr = Mux(resetting, flushCounter(idxBits-1, 0), s2_repl_idx) val wdata = Mux(resetting, 0.U, s2_new_repl_state) val wen = resetting || s2_repl_wen when (wen) { repl_array(waddr) := wdata } replacer.get_replace_way(s1_repl_state) } else { replacer.way }) // release val (c_first, c_last, releaseDone, c_count) = edge.count(tl_out_c) val releaseRejected = Wire(Bool()) val s1_release_data_valid = RegNext(dataArb.io.in(2).fire) val s2_release_data_valid = RegNext(s1_release_data_valid && !releaseRejected) releaseRejected := s2_release_data_valid && !tl_out_c.fire val releaseDataBeat = Cat(0.U, c_count) + Mux(releaseRejected, 0.U, s1_release_data_valid + Cat(0.U, s2_release_data_valid)) val nackResponseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = TLPermissions.NtoN) val cleanReleaseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = s2_report_param) val dirtyReleaseMessage = edge.ProbeAck(b = probe_bits, reportPermissions = s2_report_param, data = 0.U) tl_out_c.valid := (s2_release_data_valid || (!cacheParams.silentDrop.B && release_state === s_voluntary_release)) && !(c_first && release_ack_wait) tl_out_c.bits := nackResponseMessage val newCoh = WireDefault(probeNewCoh) releaseWay := s2_probe_way if (!usingDataScratchpad) { when (s2_victimize) { assert(s2_valid_flush_line || s2_flush_valid || io.cpu.s2_nack) val discard_line = s2_valid_flush_line && s2_req.size(1) || s2_flush_valid && flushing_req.size(1) release_state := Mux(s2_victim_dirty && !discard_line, s_voluntary_writeback, Mux(!cacheParams.silentDrop.B && !release_ack_wait && release_queue_empty && s2_victim_state.isValid() && (s2_valid_flush_line || s2_flush_valid || s2_readwrite && !s2_hit_valid), s_voluntary_release, s_voluntary_write_meta)) probe_bits := addressToProbe(s2_vaddr, Cat(s2_victim_tag, s2_req.addr(tagLSB-1, idxLSB)) << idxLSB) } when (s2_probe) { val probeNack = WireDefault(true.B) when (s2_meta_error) { release_state := s_probe_retry }.elsewhen (s2_prb_ack_data) { release_state := s_probe_rep_dirty }.elsewhen (s2_probe_state.isValid()) { tl_out_c.valid := true.B tl_out_c.bits := cleanReleaseMessage release_state := Mux(releaseDone, s_probe_write_meta, s_probe_rep_clean) }.otherwise { tl_out_c.valid := true.B probeNack := !releaseDone release_state := Mux(releaseDone, s_ready, s_probe_rep_miss) } when (probeNack) { s1_nack := true.B } } when (release_state === s_probe_retry) { metaArb.io.in(6).valid := true.B metaArb.io.in(6).bits.idx := probeIdx(probe_bits) metaArb.io.in(6).bits.addr := Cat(io.cpu.req.bits.addr >> paddrBits, probe_bits.address) when (metaArb.io.in(6).ready) { release_state := s_ready s1_probe := true.B } } when (release_state === s_probe_rep_miss) { tl_out_c.valid := true.B when (releaseDone) { release_state := s_ready } } when (release_state === s_probe_rep_clean) { tl_out_c.valid := true.B tl_out_c.bits := cleanReleaseMessage when (releaseDone) { release_state := s_probe_write_meta } } when (release_state === s_probe_rep_dirty) { tl_out_c.bits := dirtyReleaseMessage when (releaseDone) { release_state := s_probe_write_meta } } when (release_state.isOneOf(s_voluntary_writeback, s_voluntary_write_meta, s_voluntary_release)) { when (release_state === s_voluntary_release) { tl_out_c.bits := edge.Release(fromSource = 0.U, toAddress = 0.U, lgSize = lgCacheBlockBytes.U, shrinkPermissions = s2_shrink_param)._2 }.otherwise { tl_out_c.bits := edge.Release(fromSource = 0.U, toAddress = 0.U, lgSize = lgCacheBlockBytes.U, shrinkPermissions = s2_shrink_param, data = 0.U)._2 } newCoh := voluntaryNewCoh releaseWay := s2_victim_or_hit_way when (releaseDone) { release_state := s_voluntary_write_meta } when (tl_out_c.fire && c_first) { release_ack_wait := true.B release_ack_addr := probe_bits.address } } tl_out_c.bits.source := probe_bits.source tl_out_c.bits.address := probe_bits.address tl_out_c.bits.data := s2_data_corrected tl_out_c.bits.corrupt := inWriteback && s2_data_error_uncorrectable } tl_out_c.bits.user.lift(AMBAProt).foreach { x => x.fetch := false.B x.secure := true.B x.privileged := true.B x.bufferable := true.B x.modifiable := true.B x.readalloc := true.B x.writealloc := true.B } dataArb.io.in(2).valid := inWriteback && releaseDataBeat < refillCycles.U dataArb.io.in(2).bits := dataArb.io.in(1).bits dataArb.io.in(2).bits.write := false.B dataArb.io.in(2).bits.addr := (probeIdx(probe_bits) << blockOffBits) | (releaseDataBeat(log2Up(refillCycles)-1,0) << rowOffBits) dataArb.io.in(2).bits.wordMask := ~0.U((rowBytes / subWordBytes).W) dataArb.io.in(2).bits.eccMask := ~0.U((wordBytes / eccBytes).W) dataArb.io.in(2).bits.way_en := ~0.U(nWays.W) metaArb.io.in(4).valid := release_state.isOneOf(s_voluntary_write_meta, s_probe_write_meta) metaArb.io.in(4).bits.write := true.B metaArb.io.in(4).bits.way_en := releaseWay metaArb.io.in(4).bits.idx := probeIdx(probe_bits) metaArb.io.in(4).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, probe_bits.address(idxMSB, 0)) metaArb.io.in(4).bits.data := tECC.encode(L1Metadata(tl_out_c.bits.address >> tagLSB, newCoh).asUInt) when (metaArb.io.in(4).fire) { release_state := s_ready } // cached response (io.cpu.resp.bits: Data).waiveAll :<>= (s2_req: Data).waiveAll io.cpu.resp.bits.has_data := s2_read io.cpu.resp.bits.replay := false.B io.cpu.s2_uncached := s2_uncached && !s2_hit io.cpu.s2_paddr := s2_req.addr io.cpu.s2_gpa := s2_tlb_xcpt.gpa io.cpu.s2_gpa_is_pte := s2_tlb_xcpt.gpa_is_pte // report whether there are any outstanding accesses. disregard any // slave-port accesses, since they don't affect local memory ordering. val s1_isSlavePortAccess = s1_req.no_xcpt val s2_isSlavePortAccess = s2_req.no_xcpt io.cpu.ordered := !(s1_valid && !s1_isSlavePortAccess || s2_valid && !s2_isSlavePortAccess || cached_grant_wait || uncachedInFlight.asUInt.orR) io.cpu.store_pending := (cached_grant_wait && isWrite(s2_req.cmd)) || uncachedInFlight.asUInt.orR val s1_xcpt_valid = tlb.io.req.valid && !s1_isSlavePortAccess && !s1_nack io.cpu.s2_xcpt := Mux(RegNext(s1_xcpt_valid), s2_tlb_xcpt, 0.U.asTypeOf(s2_tlb_xcpt)) if (usingDataScratchpad) { assert(!(s2_valid_masked && s2_req.cmd.isOneOf(M_XLR, M_XSC))) } else { ccover(tl_out.b.valid && !tl_out.b.ready, "BLOCK_B", "D$ B-channel blocked") } // uncached response val s1_uncached_data_word = { val word_idx = uncachedResp.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes)) val words = tl_out.d.bits.data.grouped(wordBits) words(word_idx) } val s2_uncached_data_word = RegEnable(s1_uncached_data_word, io.cpu.replay_next) val doUncachedResp = RegNext(io.cpu.replay_next) io.cpu.resp.valid := (s2_valid_hit_pre_data_ecc || doUncachedResp) && !s2_data_error io.cpu.replay_next := tl_out.d.fire && grantIsUncachedData && !cacheParams.separateUncachedResp.B when (doUncachedResp) { assert(!s2_valid_hit) io.cpu.resp.bits.replay := true.B io.cpu.resp.bits.addr := s2_uncached_resp_addr } io.cpu.uncached_resp.map { resp => resp.valid := tl_out.d.valid && grantIsUncachedData resp.bits.tag := uncachedResp.tag resp.bits.size := uncachedResp.size resp.bits.signed := uncachedResp.signed resp.bits.data := new LoadGen(uncachedResp.size, uncachedResp.signed, uncachedResp.addr, s1_uncached_data_word, false.B, wordBytes).data resp.bits.data_raw := s1_uncached_data_word when (grantIsUncachedData && !resp.ready) { tl_out.d.ready := false.B } } // load data subword mux/sign extension val s2_data_word = (0 until rowBits by wordBits).map(i => s2_data_uncorrected(wordBits+i-1,i)).reduce(_|_) val s2_data_word_corrected = (0 until rowBits by wordBits).map(i => s2_data_corrected(wordBits+i-1,i)).reduce(_|_) val s2_data_word_possibly_uncached = Mux(cacheParams.pipelineWayMux.B && doUncachedResp, s2_uncached_data_word, 0.U) | s2_data_word val loadgen = new LoadGen(s2_req.size, s2_req.signed, s2_req.addr, s2_data_word_possibly_uncached, s2_sc, wordBytes) io.cpu.resp.bits.data := loadgen.data | s2_sc_fail io.cpu.resp.bits.data_word_bypass := loadgen.wordData io.cpu.resp.bits.data_raw := s2_data_word io.cpu.resp.bits.store_data := pstore1_data // AMOs if (usingRMW) { val amoalus = (0 until coreDataBits / xLen).map { i => val amoalu = Module(new AMOALU(xLen)) amoalu.io.mask := pstore1_mask >> (i * xBytes) amoalu.io.cmd := (if (usingAtomicsInCache) pstore1_cmd else M_XWR) amoalu.io.lhs := s2_data_word >> (i * xLen) amoalu.io.rhs := pstore1_data >> (i * xLen) amoalu } pstore1_storegen_data := (if (!usingDataScratchpad) amoalus.map(_.io.out).asUInt else { val mask = FillInterleaved(8, Mux(s2_correct, 0.U, pstore1_mask)) amoalus.map(_.io.out_unmasked).asUInt & mask | s2_data_word_corrected & ~mask }) } else if (!usingAtomics) { assert(!(s1_valid_masked && s1_read && s1_write), "unsupported D$ operation") } if (coreParams.useVector) { edge.manager.managers.foreach { m => // Statically ensure that no-allocate accesses are permitted. // We could consider turning some of these into dynamic PMA checks. require(!m.supportsAcquireB || m.supportsGet, "With a vector unit, cacheable memory must support Get") require(!m.supportsAcquireT || m.supportsPutPartial, "With a vector unit, cacheable memory must support PutPartial") } } // flushes if (!usingDataScratchpad) when (RegNext(reset.asBool)) { resetting := true.B } val flushCounterNext = flushCounter +& 1.U val flushDone = (flushCounterNext >> log2Ceil(nSets)) === nWays.U val flushCounterWrap = flushCounterNext(log2Ceil(nSets)-1, 0) ccover(s2_valid_masked && s2_cmd_flush_all && s2_meta_error, "TAG_ECC_ERROR_DURING_FENCE_I", "D$ ECC error in tag array during cache flush") ccover(s2_valid_masked && s2_cmd_flush_all && s2_data_error, "DATA_ECC_ERROR_DURING_FENCE_I", "D$ ECC error in data array during cache flush") s1_flush_valid := metaArb.io.in(5).fire && !s1_flush_valid && !s2_flush_valid_pre_tag_ecc && release_state === s_ready && !release_ack_wait metaArb.io.in(5).valid := flushing && !flushed metaArb.io.in(5).bits.write := false.B metaArb.io.in(5).bits.idx := flushCounter(idxBits-1, 0) metaArb.io.in(5).bits.addr := Cat(io.cpu.req.bits.addr >> untagBits, metaArb.io.in(5).bits.idx << blockOffBits) metaArb.io.in(5).bits.way_en := metaArb.io.in(4).bits.way_en metaArb.io.in(5).bits.data := metaArb.io.in(4).bits.data // Only flush D$ on FENCE.I if some cached executable regions are untracked. if (supports_flush) { when (s2_valid_masked && s2_cmd_flush_all) { when (!flushed && !io.cpu.s2_kill && !release_ack_wait && !uncachedInFlight.asUInt.orR) { flushing := true.B flushing_req := s2_req } } when (tl_out_a.fire && !s2_uncached) { flushed := false.B } when (flushing) { s1_victim_way := flushCounter >> log2Up(nSets) when (s2_flush_valid) { flushCounter := flushCounterNext when (flushDone) { flushed := true.B if (!isPow2(nWays)) flushCounter := flushCounterWrap } } when (flushed && release_state === s_ready && !release_ack_wait) { flushing := false.B } } } metaArb.io.in(0).valid := resetting metaArb.io.in(0).bits := metaArb.io.in(5).bits metaArb.io.in(0).bits.write := true.B metaArb.io.in(0).bits.way_en := ~0.U(nWays.W) metaArb.io.in(0).bits.data := tECC.encode(L1Metadata(0.U, ClientMetadata.onReset).asUInt) when (resetting) { flushCounter := flushCounterNext when (flushDone) { resetting := false.B if (!isPow2(nWays)) flushCounter := flushCounterWrap } } // gate the clock clock_en_reg := !cacheParams.clockGate.B || io.ptw.customCSRs.disableDCacheClockGate || io.cpu.keep_clock_enabled || metaArb.io.out.valid || // subsumes resetting || flushing s1_probe || s2_probe || s1_valid || s2_valid || io.tlb_port.req.valid || s1_tlb_req_valid || s2_tlb_req_valid || pstore1_held || pstore2_valid || release_state =/= s_ready || release_ack_wait || !release_queue_empty || !tlb.io.req.ready || cached_grant_wait || uncachedInFlight.asUInt.orR || lrscCount > 0.U || blockProbeAfterGrantCount > 0.U // performance events io.cpu.perf.acquire := edge.done(tl_out_a) io.cpu.perf.release := edge.done(tl_out_c) io.cpu.perf.grant := tl_out.d.valid && d_last io.cpu.perf.tlbMiss := io.ptw.req.fire io.cpu.perf.storeBufferEmptyAfterLoad := !( (s1_valid && s1_write) || ((s2_valid && s2_write && !s2_waw_hazard) || pstore1_held) || pstore2_valid) io.cpu.perf.storeBufferEmptyAfterStore := !( (s1_valid && s1_write) || (s2_valid && s2_write && pstore1_rmw) || ((s2_valid && s2_write && !s2_waw_hazard || pstore1_held) && pstore2_valid)) io.cpu.perf.canAcceptStoreThenLoad := !( ((s2_valid && s2_write && pstore1_rmw) && (s1_valid && s1_write && !s1_waw_hazard)) || (pstore2_valid && pstore1_valid_likely && (s1_valid && s1_write))) io.cpu.perf.canAcceptStoreThenRMW := io.cpu.perf.canAcceptStoreThenLoad && !pstore2_valid io.cpu.perf.canAcceptLoadThenLoad := !((s1_valid && s1_write && needsRead(s1_req)) && ((s2_valid && s2_write && !s2_waw_hazard || pstore1_held) || pstore2_valid)) io.cpu.perf.blocked := { // stop reporting blocked just before unblocking to avoid overly conservative stalling val beatsBeforeEnd = outer.crossing match { case SynchronousCrossing(_) => 2 case RationalCrossing(_) => 1 // assumes 1 < ratio <= 2; need more bookkeeping for optimal handling of >2 case _: AsynchronousCrossing => 1 // likewise case _: CreditedCrossing => 1 // likewise } val near_end_of_refill = if (cacheBlockBytes / beatBytes <= beatsBeforeEnd) tl_out.d.valid else { val refill_count = RegInit(0.U((cacheBlockBytes / beatBytes).log2.W)) when (tl_out.d.fire && grantIsRefill) { refill_count := refill_count + 1.U } refill_count >= (cacheBlockBytes / beatBytes - beatsBeforeEnd).U } cached_grant_wait && !near_end_of_refill } // report errors val (data_error, data_error_uncorrectable, data_error_addr) = if (usingDataScratchpad) (s2_valid_data_error, s2_data_error_uncorrectable, s2_req.addr) else { (RegNext(tl_out_c.fire && inWriteback && s2_data_error), RegNext(s2_data_error_uncorrectable), probe_bits.address) // This is stable for a cycle after tl_out_c.fire, so don't need a register } { val error_addr = Mux(metaArb.io.in(1).valid, Cat(s2_first_meta_corrected.tag, metaArb.io.in(1).bits.addr(tagLSB-1, idxLSB)), data_error_addr >> idxLSB) << idxLSB io.errors.uncorrectable.foreach { u => u.valid := metaArb.io.in(1).valid && s2_meta_error_uncorrectable || data_error && data_error_uncorrectable u.bits := error_addr } io.errors.correctable.foreach { c => c.valid := metaArb.io.in(1).valid || data_error c.bits := error_addr io.errors.uncorrectable.foreach { u => when (u.valid) { c.valid := false.B } } } io.errors.bus.valid := tl_out.d.fire && (tl_out.d.bits.denied || tl_out.d.bits.corrupt) io.errors.bus.bits := Mux(grantIsCached, s2_req.addr >> idxLSB << idxLSB, 0.U) ccoverNotScratchpad(io.errors.bus.valid && grantIsCached, "D_ERROR_CACHED", "D$ D-channel error, cached") ccover(io.errors.bus.valid && !grantIsCached, "D_ERROR_UNCACHED", "D$ D-channel error, uncached") } if (usingDataScratchpad) { val data_error_cover = Seq( property.CoverBoolean(!data_error, Seq("no_data_error")), property.CoverBoolean(data_error && !data_error_uncorrectable, Seq("data_correctable_error")), property.CoverBoolean(data_error && data_error_uncorrectable, Seq("data_uncorrectable_error"))) val request_source = Seq( property.CoverBoolean(s2_isSlavePortAccess, Seq("from_TL")), property.CoverBoolean(!s2_isSlavePortAccess, Seq("from_CPU"))) property.cover(new property.CrossProperty( Seq(data_error_cover, request_source), Seq(), "MemorySystem;;Scratchpad Memory Bit Flip Cross Covers")) } else { val data_error_type = Seq( property.CoverBoolean(!s2_valid_data_error, Seq("no_data_error")), property.CoverBoolean(s2_valid_data_error && !s2_data_error_uncorrectable, Seq("data_correctable_error")), property.CoverBoolean(s2_valid_data_error && s2_data_error_uncorrectable, Seq("data_uncorrectable_error"))) val data_error_dirty = Seq( property.CoverBoolean(!s2_victim_dirty, Seq("data_clean")), property.CoverBoolean(s2_victim_dirty, Seq("data_dirty"))) val request_source = if (supports_flush) { Seq( property.CoverBoolean(!flushing, Seq("access")), property.CoverBoolean(flushing, Seq("during_flush"))) } else { Seq(property.CoverBoolean(true.B, Seq("never_flush"))) } val tag_error_cover = Seq( property.CoverBoolean( !s2_meta_error, Seq("no_tag_error")), property.CoverBoolean( s2_meta_error && !s2_meta_error_uncorrectable, Seq("tag_correctable_error")), property.CoverBoolean( s2_meta_error && s2_meta_error_uncorrectable, Seq("tag_uncorrectable_error"))) property.cover(new property.CrossProperty( Seq(data_error_type, data_error_dirty, request_source, tag_error_cover), Seq(), "MemorySystem;;Cache Memory Bit Flip Cross Covers")) } } // leaving gated-clock domain val dcacheImpl = withClock (gated_clock) { new DCacheModuleImpl } def encodeData(x: UInt, poison: Bool) = x.grouped(eccBits).map(dECC.encode(_, if (dECC.canDetect) poison else false.B)).asUInt def dummyEncodeData(x: UInt) = x.grouped(eccBits).map(dECC.swizzle(_)).asUInt def decodeData(x: UInt) = x.grouped(dECC.width(eccBits)).map(dECC.decode(_)) def eccMask(byteMask: UInt) = byteMask.grouped(eccBytes).map(_.orR).asUInt def eccByteMask(byteMask: UInt) = FillInterleaved(eccBytes, eccMask(byteMask)) def likelyNeedsRead(req: HellaCacheReq) = { val res = !req.cmd.isOneOf(M_XWR, M_PFW) || req.size < log2Ceil(eccBytes).U assert(!needsRead(req) || res) res } def needsRead(req: HellaCacheReq) = isRead(req.cmd) || (isWrite(req.cmd) && (req.cmd === M_PWR || req.size < log2Ceil(eccBytes).U)) def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = property.cover(cond, s"DCACHE_$label", "MemorySystem;;" + desc) def ccoverNotScratchpad(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = if (!usingDataScratchpad) ccover(cond, label, desc) require(!usingVM || tagLSB <= pgIdxBits, s"D$$ set size must not exceed ${1<<(pgIdxBits-10)} KiB; got ${(nSets * cacheBlockBytes)>>10} KiB") def tagLSB: Int = untagBits def probeIdx(b: TLBundleB): UInt = b.address(idxMSB, idxLSB) def addressToProbe(vaddr: UInt, paddr: UInt): TLBundleB = { val res = Wire(new TLBundleB(edge.bundle)) res :#= DontCare res.address := paddr res.source := (mmioOffset - 1).U res } def acquire(vaddr: UInt, paddr: UInt, param: UInt): TLBundleA = { if (!edge.manager.anySupportAcquireB) WireDefault(0.U.asTypeOf(new TLBundleA(edge.bundle))) else edge.AcquireBlock(0.U, paddr >> lgCacheBlockBytes << lgCacheBlockBytes, lgCacheBlockBytes.U, param)._2 } } File DescribedSRAM.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3.{Data, SyncReadMem, Vec} import chisel3.util.log2Ceil object DescribedSRAM { def apply[T <: Data]( name: String, desc: String, size: BigInt, // depth data: T ): SyncReadMem[T] = { val mem = SyncReadMem(size, data) mem.suggestName(name) val granWidth = data match { case v: Vec[_] => v.head.getWidth case d => d.getWidth } val uid = 0 Annotated.srams( component = mem, name = name, address_width = log2Ceil(size), data_width = data.getWidth, depth = size, description = desc, write_mask_granularity = granWidth ) mem } } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module DCache( // @[DCache.scala:101:7] input clock, // @[DCache.scala:101:7] input reset, // @[DCache.scala:101:7] input auto_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_b_ready, // @[LazyModuleImp.scala:107:25] input auto_out_b_valid, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_b_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_out_b_bits_size, // @[LazyModuleImp.scala:107:25] input auto_out_b_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_out_b_bits_address, // @[LazyModuleImp.scala:107:25] input auto_out_c_ready, // @[LazyModuleImp.scala:107:25] output auto_out_c_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_c_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_c_bits_size, // @[LazyModuleImp.scala:107:25] output auto_out_c_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_c_bits_address, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_c_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_out_e_ready, // @[LazyModuleImp.scala:107:25] output auto_out_e_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_e_bits_sink, // @[LazyModuleImp.scala:107:25] output io_cpu_req_ready, // @[HellaCache.scala:243:14] input io_cpu_req_valid, // @[HellaCache.scala:243:14] input [39:0] io_cpu_req_bits_addr, // @[HellaCache.scala:243:14] input [6:0] io_cpu_req_bits_tag, // @[HellaCache.scala:243:14] input [4:0] io_cpu_req_bits_cmd, // @[HellaCache.scala:243:14] input [1:0] io_cpu_req_bits_size, // @[HellaCache.scala:243:14] input io_cpu_req_bits_signed, // @[HellaCache.scala:243:14] input [1:0] io_cpu_req_bits_dprv, // @[HellaCache.scala:243:14] input io_cpu_req_bits_dv, // @[HellaCache.scala:243:14] input io_cpu_req_bits_phys, // @[HellaCache.scala:243:14] input io_cpu_s1_kill, // @[HellaCache.scala:243:14] input [63:0] io_cpu_s1_data_data, // @[HellaCache.scala:243:14] input [7:0] io_cpu_s1_data_mask, // @[HellaCache.scala:243:14] output io_cpu_s2_nack, // @[HellaCache.scala:243:14] output io_cpu_resp_valid, // @[HellaCache.scala:243:14] output [39:0] io_cpu_resp_bits_addr, // @[HellaCache.scala:243:14] output [6:0] io_cpu_resp_bits_tag, // @[HellaCache.scala:243:14] output [4:0] io_cpu_resp_bits_cmd, // @[HellaCache.scala:243:14] output [1:0] io_cpu_resp_bits_size, // @[HellaCache.scala:243:14] output io_cpu_resp_bits_signed, // @[HellaCache.scala:243:14] output [1:0] io_cpu_resp_bits_dprv, // @[HellaCache.scala:243:14] output io_cpu_resp_bits_dv, // @[HellaCache.scala:243:14] output [63:0] io_cpu_resp_bits_data, // @[HellaCache.scala:243:14] output [7:0] io_cpu_resp_bits_mask, // @[HellaCache.scala:243:14] output io_cpu_resp_bits_replay, // @[HellaCache.scala:243:14] output io_cpu_resp_bits_has_data, // @[HellaCache.scala:243:14] output [63:0] io_cpu_resp_bits_data_word_bypass, // @[HellaCache.scala:243:14] output [63:0] io_cpu_resp_bits_data_raw, // @[HellaCache.scala:243:14] output [63:0] io_cpu_resp_bits_store_data, // @[HellaCache.scala:243:14] output io_cpu_replay_next, // @[HellaCache.scala:243:14] output io_cpu_s2_xcpt_ma_ld, // @[HellaCache.scala:243:14] output io_cpu_s2_xcpt_ma_st, // @[HellaCache.scala:243:14] output io_cpu_s2_xcpt_pf_ld, // @[HellaCache.scala:243:14] output io_cpu_s2_xcpt_pf_st, // @[HellaCache.scala:243:14] output io_cpu_s2_xcpt_ae_ld, // @[HellaCache.scala:243:14] output io_cpu_s2_xcpt_ae_st, // @[HellaCache.scala:243:14] output io_cpu_ordered, // @[HellaCache.scala:243:14] output io_cpu_perf_release, // @[HellaCache.scala:243:14] output io_cpu_perf_grant, // @[HellaCache.scala:243:14] input io_ptw_req_ready, // @[HellaCache.scala:243:14] output io_ptw_req_valid, // @[HellaCache.scala:243:14] output [26:0] io_ptw_req_bits_bits_addr, // @[HellaCache.scala:243:14] output io_ptw_req_bits_bits_need_gpa, // @[HellaCache.scala:243:14] input io_ptw_resp_valid, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_ae_ptw, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_ae_final, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_pf, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_gf, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_hr, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_hw, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_hx, // @[HellaCache.scala:243:14] input [43:0] io_ptw_resp_bits_pte_ppn, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_pte_d, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_pte_a, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_pte_g, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_pte_u, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_pte_x, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_pte_w, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_pte_r, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_pte_v, // @[HellaCache.scala:243:14] input [1:0] io_ptw_resp_bits_level, // @[HellaCache.scala:243:14] input io_ptw_resp_bits_homogeneous, // @[HellaCache.scala:243:14] input [3:0] io_ptw_ptbr_mode, // @[HellaCache.scala:243:14] input io_ptw_status_debug, // @[HellaCache.scala:243:14] input io_ptw_status_mxr, // @[HellaCache.scala:243:14] input io_ptw_status_sum, // @[HellaCache.scala:243:14] input io_ptw_pmp_0_cfg_l, // @[HellaCache.scala:243:14] input [1:0] io_ptw_pmp_0_cfg_a, // @[HellaCache.scala:243:14] input io_ptw_pmp_0_cfg_x, // @[HellaCache.scala:243:14] input io_ptw_pmp_0_cfg_w, // @[HellaCache.scala:243:14] input io_ptw_pmp_0_cfg_r, // @[HellaCache.scala:243:14] input [29:0] io_ptw_pmp_0_addr, // @[HellaCache.scala:243:14] input [31:0] io_ptw_pmp_0_mask, // @[HellaCache.scala:243:14] input io_ptw_pmp_1_cfg_l, // @[HellaCache.scala:243:14] input [1:0] io_ptw_pmp_1_cfg_a, // @[HellaCache.scala:243:14] input io_ptw_pmp_1_cfg_x, // @[HellaCache.scala:243:14] input io_ptw_pmp_1_cfg_w, // @[HellaCache.scala:243:14] input io_ptw_pmp_1_cfg_r, // @[HellaCache.scala:243:14] input [29:0] io_ptw_pmp_1_addr, // @[HellaCache.scala:243:14] input [31:0] io_ptw_pmp_1_mask, // @[HellaCache.scala:243:14] input io_ptw_pmp_2_cfg_l, // @[HellaCache.scala:243:14] input [1:0] io_ptw_pmp_2_cfg_a, // @[HellaCache.scala:243:14] input io_ptw_pmp_2_cfg_x, // @[HellaCache.scala:243:14] input io_ptw_pmp_2_cfg_w, // @[HellaCache.scala:243:14] input io_ptw_pmp_2_cfg_r, // @[HellaCache.scala:243:14] input [29:0] io_ptw_pmp_2_addr, // @[HellaCache.scala:243:14] input [31:0] io_ptw_pmp_2_mask, // @[HellaCache.scala:243:14] input io_ptw_pmp_3_cfg_l, // @[HellaCache.scala:243:14] input [1:0] io_ptw_pmp_3_cfg_a, // @[HellaCache.scala:243:14] input io_ptw_pmp_3_cfg_x, // @[HellaCache.scala:243:14] input io_ptw_pmp_3_cfg_w, // @[HellaCache.scala:243:14] input io_ptw_pmp_3_cfg_r, // @[HellaCache.scala:243:14] input [29:0] io_ptw_pmp_3_addr, // @[HellaCache.scala:243:14] input [31:0] io_ptw_pmp_3_mask, // @[HellaCache.scala:243:14] input io_ptw_pmp_4_cfg_l, // @[HellaCache.scala:243:14] input [1:0] io_ptw_pmp_4_cfg_a, // @[HellaCache.scala:243:14] input io_ptw_pmp_4_cfg_x, // @[HellaCache.scala:243:14] input io_ptw_pmp_4_cfg_w, // @[HellaCache.scala:243:14] input io_ptw_pmp_4_cfg_r, // @[HellaCache.scala:243:14] input [29:0] io_ptw_pmp_4_addr, // @[HellaCache.scala:243:14] input [31:0] io_ptw_pmp_4_mask, // @[HellaCache.scala:243:14] input io_ptw_pmp_5_cfg_l, // @[HellaCache.scala:243:14] input [1:0] io_ptw_pmp_5_cfg_a, // @[HellaCache.scala:243:14] input io_ptw_pmp_5_cfg_x, // @[HellaCache.scala:243:14] input io_ptw_pmp_5_cfg_w, // @[HellaCache.scala:243:14] input io_ptw_pmp_5_cfg_r, // @[HellaCache.scala:243:14] input [29:0] io_ptw_pmp_5_addr, // @[HellaCache.scala:243:14] input [31:0] io_ptw_pmp_5_mask, // @[HellaCache.scala:243:14] input io_ptw_pmp_6_cfg_l, // @[HellaCache.scala:243:14] input [1:0] io_ptw_pmp_6_cfg_a, // @[HellaCache.scala:243:14] input io_ptw_pmp_6_cfg_x, // @[HellaCache.scala:243:14] input io_ptw_pmp_6_cfg_w, // @[HellaCache.scala:243:14] input io_ptw_pmp_6_cfg_r, // @[HellaCache.scala:243:14] input [29:0] io_ptw_pmp_6_addr, // @[HellaCache.scala:243:14] input [31:0] io_ptw_pmp_6_mask, // @[HellaCache.scala:243:14] input io_ptw_pmp_7_cfg_l, // @[HellaCache.scala:243:14] input [1:0] io_ptw_pmp_7_cfg_a, // @[HellaCache.scala:243:14] input io_ptw_pmp_7_cfg_x, // @[HellaCache.scala:243:14] input io_ptw_pmp_7_cfg_w, // @[HellaCache.scala:243:14] input io_ptw_pmp_7_cfg_r, // @[HellaCache.scala:243:14] input [29:0] io_ptw_pmp_7_addr, // @[HellaCache.scala:243:14] input [31:0] io_ptw_pmp_7_mask // @[HellaCache.scala:243:14] ); wire [5:0] metaArb_io_in_5_bits_idx; // @[DCache.scala:1017:44] wire io_cpu_s2_xcpt_ma_st_0; // @[DCache.scala:933:24] wire io_cpu_s2_xcpt_ma_ld_0; // @[DCache.scala:933:24] wire io_cpu_s2_xcpt_ae_st_0; // @[DCache.scala:933:24] wire io_cpu_s2_xcpt_ae_ld_0; // @[DCache.scala:933:24] wire io_cpu_s2_xcpt_pf_st_0; // @[DCache.scala:933:24] wire io_cpu_s2_xcpt_pf_ld_0; // @[DCache.scala:933:24] wire [21:0] metaArb_io_in_7_bits_data; // @[DCache.scala:913:97] wire metaArb_io_in_4_valid; // @[package.scala:81:59] wire [11:0] dataArb_io_in_2_bits_addr; // @[DCache.scala:903:72] wire dataArb_io_in_2_valid; // @[DCache.scala:900:41] wire [7:0] releaseWay; // @[DCache.scala:813:14, :863:102, :877:18] wire [3:0] nodeOut_c_bits_size; // @[DCache.scala:859:48, :863:102, :864:52] wire [2:0] nodeOut_c_bits_opcode; // @[DCache.scala:859:48, :863:102, :864:52] wire nodeOut_c_valid; // @[DCache.scala:810:18, :824:21, :826:28, :850:47, :851:22, :854:48, :855:22] wire [5:0] metaArb_io_in_6_bits_idx; // @[DCache.scala:772:29, :841:44, :843:33] wire metaArb_io_in_6_valid; // @[DCache.scala:769:26, :841:44, :842:30] wire s1_nack; // @[DCache.scala:571:36, :824:21, :839:24] wire dataArb_io_in_1_bits_write; // @[DCache.scala:727:33, :752:68, :755:29] wire dataArb_io_in_1_valid; // @[DCache.scala:721:26, :752:68, :755:29, :757:32] wire nodeOut_d_ready; // @[DCache.scala:671:18, :722:51, :724:20, :752:68, :753:22] wire [21:0] metaArb_io_in_3_bits_data; // @[DCache.scala:746:134] wire metaArb_io_in_3_valid; // @[Edges.scala:233:22] wire [11:0] dataArb_io_in_1_bits_addr; // @[DCache.scala:728:32] wire [7:0] dataArb_io_in_0_bits_eccMask; // @[DCache.scala:557:47] wire [63:0] dataArb_io_in_0_bits_wdata; // @[DCache.scala:551:63] wire [7:0] dataArb_io_in_0_bits_way_en; // @[DCache.scala:550:38] wire [11:0] _dataArb_io_in_0_bits_wordMask_wordMask_T; // @[DCache.scala:549:36] wire dataArb_io_in_0_valid; // @[DCache.scala:517:44] wire pstore_drain; // @[DCache.scala:517:44] wire [21:0] metaArb_io_in_2_bits_data; // @[DCache.scala:467:97] wire [5:0] metaArb_io_in_3_bits_idx; // @[DCache.scala:453:76] wire [5:0] metaArb_io_in_4_bits_idx; // @[DCache.scala:1200:47] wire metaArb_io_in_2_valid; // @[DCache.scala:446:62] wire [7:0] s2_victim_or_hit_way; // @[DCache.scala:432:33] reg [39:0] s2_req_addr; // @[DCache.scala:339:19] wire rockettile_dcache_tag_array_s1_meta_en; // @[DCache.scala:314:59] wire rockettile_dcache_tag_array_MPORT_en; // @[DCache.scala:310:27] wire [5:0] metaArb_io_in_7_bits_idx; // @[DCache.scala:263:58] wire [11:0] dataArb_io_in_3_bits_addr; // @[DCache.scala:245:30] wire dataArb_io_in_3_valid; // @[DCache.scala:242:46] reg [7:0] refill_way; // @[DCache.scala:229:23] reg resetting; // @[DCache.scala:224:26] reg [1:0] s1_tlb_req_prv; // @[DCache.scala:208:29] reg s1_tlb_req_passthrough; // @[DCache.scala:208:29] reg [39:0] s1_tlb_req_vaddr; // @[DCache.scala:208:29] wire [63:0] _amoalus_0_io_out; // @[DCache.scala:982:26] wire [63:0] _data_io_resp_0; // @[DCache.scala:145:20] wire [63:0] _data_io_resp_1; // @[DCache.scala:145:20] wire [63:0] _data_io_resp_2; // @[DCache.scala:145:20] wire [63:0] _data_io_resp_3; // @[DCache.scala:145:20] wire [63:0] _data_io_resp_4; // @[DCache.scala:145:20] wire [63:0] _data_io_resp_5; // @[DCache.scala:145:20] wire [63:0] _data_io_resp_6; // @[DCache.scala:145:20] wire [63:0] _data_io_resp_7; // @[DCache.scala:145:20] wire [175:0] _rockettile_dcache_tag_array_RW0_rdata; // @[DescribedSRAM.scala:17:26] wire _lfsr_prng_io_out_0; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_1; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_2; // @[PRNG.scala:91:22] wire [19:0] _tlb_entries_barrier_12_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_12_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_12_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_12_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_12_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_12_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_12_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_12_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_12_io_y_sr; // @[package.scala:267:25] wire [19:0] _tlb_entries_barrier_11_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_sr; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_pw; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_pr; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_ppp; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_pal; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_paa; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_eff; // @[package.scala:267:25] wire _tlb_entries_barrier_11_io_y_c; // @[package.scala:267:25] wire [19:0] _tlb_entries_barrier_10_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_sr; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_pw; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_pr; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_ppp; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_pal; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_paa; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_eff; // @[package.scala:267:25] wire _tlb_entries_barrier_10_io_y_c; // @[package.scala:267:25] wire [19:0] _tlb_entries_barrier_9_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_sr; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_pw; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_pr; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_ppp; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_pal; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_paa; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_eff; // @[package.scala:267:25] wire _tlb_entries_barrier_9_io_y_c; // @[package.scala:267:25] wire [19:0] _tlb_entries_barrier_8_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_sr; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_pw; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_pr; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_ppp; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_pal; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_paa; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_eff; // @[package.scala:267:25] wire _tlb_entries_barrier_8_io_y_c; // @[package.scala:267:25] wire [19:0] _tlb_entries_barrier_7_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_sr; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_pw; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_pr; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_ppp; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_pal; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_paa; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_eff; // @[package.scala:267:25] wire _tlb_entries_barrier_7_io_y_c; // @[package.scala:267:25] wire [19:0] _tlb_entries_barrier_6_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_sr; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_pw; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_pr; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_ppp; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_pal; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_paa; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_eff; // @[package.scala:267:25] wire _tlb_entries_barrier_6_io_y_c; // @[package.scala:267:25] wire [19:0] _tlb_entries_barrier_5_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_sr; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_pw; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_pr; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_ppp; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_pal; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_paa; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_eff; // @[package.scala:267:25] wire _tlb_entries_barrier_5_io_y_c; // @[package.scala:267:25] wire [19:0] _tlb_entries_barrier_4_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_sr; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_pw; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_pr; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_ppp; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_pal; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_paa; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_eff; // @[package.scala:267:25] wire _tlb_entries_barrier_4_io_y_c; // @[package.scala:267:25] wire [19:0] _tlb_entries_barrier_3_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_sr; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_pw; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_pr; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_ppp; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_pal; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_paa; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_eff; // @[package.scala:267:25] wire _tlb_entries_barrier_3_io_y_c; // @[package.scala:267:25] wire [19:0] _tlb_entries_barrier_2_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_sr; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_pw; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_pr; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_ppp; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_pal; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_paa; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_eff; // @[package.scala:267:25] wire _tlb_entries_barrier_2_io_y_c; // @[package.scala:267:25] wire [19:0] _tlb_entries_barrier_1_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_sr; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_pw; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_pr; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_ppp; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_pal; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_paa; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_eff; // @[package.scala:267:25] wire _tlb_entries_barrier_1_io_y_c; // @[package.scala:267:25] wire [19:0] _tlb_entries_barrier_io_y_ppn; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_u; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_ae_ptw; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_ae_final; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_pf; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_gf; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_sw; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_sx; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_sr; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_pw; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_pr; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_ppp; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_pal; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_paa; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_eff; // @[package.scala:267:25] wire _tlb_entries_barrier_io_y_c; // @[package.scala:267:25] wire _tlb_pma_io_resp_cacheable; // @[TLB.scala:422:19] wire _tlb_pma_io_resp_r; // @[TLB.scala:422:19] wire _tlb_pma_io_resp_w; // @[TLB.scala:422:19] wire _tlb_pma_io_resp_pp; // @[TLB.scala:422:19] wire _tlb_pma_io_resp_al; // @[TLB.scala:422:19] wire _tlb_pma_io_resp_aa; // @[TLB.scala:422:19] wire _tlb_pma_io_resp_x; // @[TLB.scala:422:19] wire _tlb_pma_io_resp_eff; // @[TLB.scala:422:19] wire _tlb_pmp_io_r; // @[TLB.scala:416:19] wire _tlb_pmp_io_w; // @[TLB.scala:416:19] wire _tlb_pmp_io_x; // @[TLB.scala:416:19] wire [19:0] _tlb_mpu_ppn_barrier_io_y_ppn; // @[package.scala:267:25] reg [26:0] tlb_sectored_entries_0_0_tag_vpn; // @[TLB.scala:339:29] reg tlb_; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_0_data_0; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_0_data_1; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_0_data_2; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_0_data_3; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_0_valid_0; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_0_valid_1; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_0_valid_2; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_0_valid_3; // @[TLB.scala:339:29] reg [26:0] tlb_sectored_entries_0_1_tag_vpn; // @[TLB.scala:339:29] reg tlb__0; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_1_data_0; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_1_data_1; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_1_data_2; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_1_data_3; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_1_valid_0; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_1_valid_1; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_1_valid_2; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_1_valid_3; // @[TLB.scala:339:29] reg [26:0] tlb_sectored_entries_0_2_tag_vpn; // @[TLB.scala:339:29] reg tlb__1; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_2_data_0; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_2_data_1; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_2_data_2; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_2_data_3; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_2_valid_0; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_2_valid_1; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_2_valid_2; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_2_valid_3; // @[TLB.scala:339:29] reg [26:0] tlb_sectored_entries_0_3_tag_vpn; // @[TLB.scala:339:29] reg tlb__2; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_3_data_0; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_3_data_1; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_3_data_2; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_3_data_3; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_3_valid_0; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_3_valid_1; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_3_valid_2; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_3_valid_3; // @[TLB.scala:339:29] reg [26:0] tlb_sectored_entries_0_4_tag_vpn; // @[TLB.scala:339:29] reg tlb__3; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_4_data_0; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_4_data_1; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_4_data_2; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_4_data_3; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_4_valid_0; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_4_valid_1; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_4_valid_2; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_4_valid_3; // @[TLB.scala:339:29] reg [26:0] tlb_sectored_entries_0_5_tag_vpn; // @[TLB.scala:339:29] reg tlb__4; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_5_data_0; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_5_data_1; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_5_data_2; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_5_data_3; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_5_valid_0; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_5_valid_1; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_5_valid_2; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_5_valid_3; // @[TLB.scala:339:29] reg [26:0] tlb_sectored_entries_0_6_tag_vpn; // @[TLB.scala:339:29] reg tlb__5; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_6_data_0; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_6_data_1; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_6_data_2; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_6_data_3; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_6_valid_0; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_6_valid_1; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_6_valid_2; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_6_valid_3; // @[TLB.scala:339:29] reg [26:0] tlb_sectored_entries_0_7_tag_vpn; // @[TLB.scala:339:29] reg tlb__6; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_7_data_0; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_7_data_1; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_7_data_2; // @[TLB.scala:339:29] reg [41:0] tlb_sectored_entries_0_7_data_3; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_7_valid_0; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_7_valid_1; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_7_valid_2; // @[TLB.scala:339:29] reg tlb_sectored_entries_0_7_valid_3; // @[TLB.scala:339:29] reg [1:0] tlb_superpage_entries_0_level; // @[TLB.scala:341:30] reg [26:0] tlb_superpage_entries_0_tag_vpn; // @[TLB.scala:341:30] reg tlb__7; // @[TLB.scala:341:30] reg [41:0] tlb_superpage_entries_0_data_0; // @[TLB.scala:341:30] reg tlb_superpage_entries_0_valid_0; // @[TLB.scala:341:30] reg [1:0] tlb_superpage_entries_1_level; // @[TLB.scala:341:30] reg [26:0] tlb_superpage_entries_1_tag_vpn; // @[TLB.scala:341:30] reg tlb__8; // @[TLB.scala:341:30] reg [41:0] tlb_superpage_entries_1_data_0; // @[TLB.scala:341:30] reg tlb_superpage_entries_1_valid_0; // @[TLB.scala:341:30] reg [1:0] tlb_superpage_entries_2_level; // @[TLB.scala:341:30] reg [26:0] tlb_superpage_entries_2_tag_vpn; // @[TLB.scala:341:30] reg tlb__9; // @[TLB.scala:341:30] reg [41:0] tlb_superpage_entries_2_data_0; // @[TLB.scala:341:30] reg tlb_superpage_entries_2_valid_0; // @[TLB.scala:341:30] reg [1:0] tlb_superpage_entries_3_level; // @[TLB.scala:341:30] reg [26:0] tlb_superpage_entries_3_tag_vpn; // @[TLB.scala:341:30] reg tlb__10; // @[TLB.scala:341:30] reg [41:0] tlb_superpage_entries_3_data_0; // @[TLB.scala:341:30] reg tlb_superpage_entries_3_valid_0; // @[TLB.scala:341:30] reg [1:0] tlb_special_entry_level; // @[TLB.scala:346:56] reg [26:0] tlb_special_entry_tag_vpn; // @[TLB.scala:346:56] reg [41:0] tlb_special_entry_data_0; // @[TLB.scala:346:56] reg tlb_special_entry_valid_0; // @[TLB.scala:346:56] reg [1:0] tlb_state; // @[TLB.scala:352:22] reg [26:0] tlb_r_refill_tag; // @[TLB.scala:354:25] reg [1:0] tlb_waddr; // @[TLB.scala:355:34] reg [2:0] tlb_r_sectored_repl_addr; // @[TLB.scala:356:33] reg tlb_r_sectored_hit_valid; // @[TLB.scala:357:27] reg [2:0] tlb_r_sectored_hit_bits; // @[TLB.scala:357:27] reg tlb_r_need_gpa; // @[TLB.scala:361:23] wire tlb_vm_enabled = io_ptw_ptbr_mode[3] & ~(s1_tlb_req_prv[1]) & ~s1_tlb_req_passthrough; // @[TLB.scala:372:27, :374:41, :399:{45,61,64}] wire tlb__11 = tlb_state == 2'h1; // @[package.scala:16:47] wire tlb_ignore_13 = tlb_special_entry_level == 2'h0; // @[TLB.scala:197:28, :346:56] wire [27:0] tlb_mpu_ppn = io_ptw_resp_valid ? {8'h0, io_ptw_resp_bits_pte_ppn[19:0]} : tlb_vm_enabled ? {8'h0, _tlb_mpu_ppn_barrier_io_y_ppn[19:18], (tlb_ignore_13 ? s1_tlb_req_vaddr[29:21] : 9'h0) | _tlb_mpu_ppn_barrier_io_y_ppn[17:9], (tlb_special_entry_level[1] ? 9'h0 : s1_tlb_req_vaddr[20:12]) | _tlb_mpu_ppn_barrier_io_y_ppn[8:0]} : s1_tlb_req_vaddr[39:12]; // @[package.scala:267:25] wire [2:0] tlb_mpu_priv = io_ptw_resp_valid | s1_tlb_req_passthrough ? 3'h1 : {io_ptw_status_debug, s1_tlb_req_prv}; // @[TLB.scala:415:{27,52,103}] wire [24:0] tlb__12 = tlb_sectored_entries_0_0_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29] wire [24:0] tlb__13 = tlb_sectored_entries_0_1_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29] wire [24:0] tlb__14 = tlb_sectored_entries_0_2_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29] wire [24:0] tlb__15 = tlb_sectored_entries_0_3_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29] wire [24:0] tlb__16 = tlb_sectored_entries_0_4_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29] wire [24:0] tlb__17 = tlb_sectored_entries_0_5_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29] wire [24:0] tlb__18 = tlb_sectored_entries_0_6_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29] wire [24:0] tlb__19 = tlb_sectored_entries_0_7_tag_vpn[26:2] ^ s1_tlb_req_vaddr[38:14]; // @[TLB.scala:174:61, :335:30, :339:29] wire [17:0] tlb__20 = tlb_superpage_entries_0_tag_vpn[26:9] ^ s1_tlb_req_vaddr[38:21]; // @[TLB.scala:183:52, :335:30, :341:30] wire tlb_ignore_1 = tlb_superpage_entries_0_level == 2'h0; // @[TLB.scala:182:28, :341:30] wire [17:0] tlb__21 = tlb_superpage_entries_1_tag_vpn[26:9] ^ s1_tlb_req_vaddr[38:21]; // @[TLB.scala:183:52, :335:30, :341:30] wire tlb_ignore_4 = tlb_superpage_entries_1_level == 2'h0; // @[TLB.scala:182:28, :341:30] wire [17:0] tlb__22 = tlb_superpage_entries_2_tag_vpn[26:9] ^ s1_tlb_req_vaddr[38:21]; // @[TLB.scala:183:52, :335:30, :341:30] wire tlb_ignore_7 = tlb_superpage_entries_2_level == 2'h0; // @[TLB.scala:182:28, :341:30] wire [17:0] tlb__23 = tlb_superpage_entries_3_tag_vpn[26:9] ^ s1_tlb_req_vaddr[38:21]; // @[TLB.scala:183:52, :335:30, :341:30] wire tlb_ignore_10 = tlb_superpage_entries_3_level == 2'h0; // @[TLB.scala:182:28, :341:30] wire [3:0] _GEN = {{tlb_sectored_entries_0_0_valid_3}, {tlb_sectored_entries_0_0_valid_2}, {tlb_sectored_entries_0_0_valid_1}, {tlb_sectored_entries_0_0_valid_0}}; // @[TLB.scala:188:18, :339:29] wire tlb_hitsVec_0 = tlb_vm_enabled & _GEN[s1_tlb_req_vaddr[13:12]] & tlb__12 == 25'h0 & ~tlb_; // @[package.scala:163:13] wire [3:0] _GEN_0 = {{tlb_sectored_entries_0_1_valid_3}, {tlb_sectored_entries_0_1_valid_2}, {tlb_sectored_entries_0_1_valid_1}, {tlb_sectored_entries_0_1_valid_0}}; // @[TLB.scala:188:18, :339:29] wire tlb_hitsVec_1 = tlb_vm_enabled & _GEN_0[s1_tlb_req_vaddr[13:12]] & tlb__13 == 25'h0 & ~tlb__0; // @[package.scala:163:13] wire [3:0] _GEN_1 = {{tlb_sectored_entries_0_2_valid_3}, {tlb_sectored_entries_0_2_valid_2}, {tlb_sectored_entries_0_2_valid_1}, {tlb_sectored_entries_0_2_valid_0}}; // @[TLB.scala:188:18, :339:29] wire tlb_hitsVec_2 = tlb_vm_enabled & _GEN_1[s1_tlb_req_vaddr[13:12]] & tlb__14 == 25'h0 & ~tlb__1; // @[package.scala:163:13] wire [3:0] _GEN_2 = {{tlb_sectored_entries_0_3_valid_3}, {tlb_sectored_entries_0_3_valid_2}, {tlb_sectored_entries_0_3_valid_1}, {tlb_sectored_entries_0_3_valid_0}}; // @[TLB.scala:188:18, :339:29] wire tlb_hitsVec_3 = tlb_vm_enabled & _GEN_2[s1_tlb_req_vaddr[13:12]] & tlb__15 == 25'h0 & ~tlb__2; // @[package.scala:163:13] wire [3:0] _GEN_3 = {{tlb_sectored_entries_0_4_valid_3}, {tlb_sectored_entries_0_4_valid_2}, {tlb_sectored_entries_0_4_valid_1}, {tlb_sectored_entries_0_4_valid_0}}; // @[TLB.scala:188:18, :339:29] wire tlb_hitsVec_4 = tlb_vm_enabled & _GEN_3[s1_tlb_req_vaddr[13:12]] & tlb__16 == 25'h0 & ~tlb__3; // @[package.scala:163:13] wire [3:0] _GEN_4 = {{tlb_sectored_entries_0_5_valid_3}, {tlb_sectored_entries_0_5_valid_2}, {tlb_sectored_entries_0_5_valid_1}, {tlb_sectored_entries_0_5_valid_0}}; // @[TLB.scala:188:18, :339:29] wire tlb_hitsVec_5 = tlb_vm_enabled & _GEN_4[s1_tlb_req_vaddr[13:12]] & tlb__17 == 25'h0 & ~tlb__4; // @[package.scala:163:13] wire [3:0] _GEN_5 = {{tlb_sectored_entries_0_6_valid_3}, {tlb_sectored_entries_0_6_valid_2}, {tlb_sectored_entries_0_6_valid_1}, {tlb_sectored_entries_0_6_valid_0}}; // @[TLB.scala:188:18, :339:29] wire tlb_hitsVec_6 = tlb_vm_enabled & _GEN_5[s1_tlb_req_vaddr[13:12]] & tlb__18 == 25'h0 & ~tlb__5; // @[package.scala:163:13] wire [3:0] _GEN_6 = {{tlb_sectored_entries_0_7_valid_3}, {tlb_sectored_entries_0_7_valid_2}, {tlb_sectored_entries_0_7_valid_1}, {tlb_sectored_entries_0_7_valid_0}}; // @[TLB.scala:188:18, :339:29] wire tlb_hitsVec_7 = tlb_vm_enabled & _GEN_6[s1_tlb_req_vaddr[13:12]] & tlb__19 == 25'h0 & ~tlb__6; // @[package.scala:163:13] wire tlb_hitsVec_8 = tlb_vm_enabled & tlb_superpage_entries_0_valid_0 & ~tlb__7 & tlb__20[17:9] == 9'h0 & (tlb_ignore_1 | tlb__20[8:0] == 9'h0); // @[TLB.scala:178:{33,43}, :182:28, :183:{29,40,52,58,79}, :341:30, :399:{45,61}, :440:44] wire tlb_hitsVec_9 = tlb_vm_enabled & tlb_superpage_entries_1_valid_0 & ~tlb__8 & tlb__21[17:9] == 9'h0 & (tlb_ignore_4 | tlb__21[8:0] == 9'h0); // @[TLB.scala:178:{33,43}, :182:28, :183:{29,40,52,58,79}, :341:30, :399:{45,61}, :440:44] wire tlb_hitsVec_10 = tlb_vm_enabled & tlb_superpage_entries_2_valid_0 & ~tlb__9 & tlb__22[17:9] == 9'h0 & (tlb_ignore_7 | tlb__22[8:0] == 9'h0); // @[TLB.scala:178:{33,43}, :182:28, :183:{29,40,52,58,79}, :341:30, :399:{45,61}, :440:44] wire tlb_hitsVec_11 = tlb_vm_enabled & tlb_superpage_entries_3_valid_0 & ~tlb__10 & tlb__23[17:9] == 9'h0 & (tlb_ignore_10 | tlb__23[8:0] == 9'h0); // @[TLB.scala:178:{33,43}, :182:28, :183:{29,40,52,58,79}, :341:30, :399:{45,61}, :440:44] wire [26:0] tlb__24 = tlb_special_entry_tag_vpn ^ s1_tlb_req_vaddr[38:12]; // @[TLB.scala:183:52, :335:30, :346:56] wire tlb_hitsVec_12 = tlb_vm_enabled & tlb_special_entry_valid_0 & tlb__24[26:18] == 9'h0 & (tlb_ignore_13 | tlb__24[17:9] == 9'h0) & (~(tlb_special_entry_level[1]) | tlb__24[8:0] == 9'h0); // @[TLB.scala:183:{29,40,52,58,79}, :197:28, :346:56, :399:{45,61}, :440:44] wire [12:0] tlb_real_hits = {tlb_hitsVec_12, tlb_hitsVec_11, tlb_hitsVec_10, tlb_hitsVec_9, tlb_hitsVec_8, tlb_hitsVec_7, tlb_hitsVec_6, tlb_hitsVec_5, tlb_hitsVec_4, tlb_hitsVec_3, tlb_hitsVec_2, tlb_hitsVec_1, tlb_hitsVec_0}; // @[package.scala:45:27] wire [3:0][41:0] _GEN_7 = {{tlb_sectored_entries_0_0_data_3}, {tlb_sectored_entries_0_0_data_2}, {tlb_sectored_entries_0_0_data_1}, {tlb_sectored_entries_0_0_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] tlb__entries_WIRE_1 = _GEN_7[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13] wire [3:0][41:0] _GEN_8 = {{tlb_sectored_entries_0_1_data_3}, {tlb_sectored_entries_0_1_data_2}, {tlb_sectored_entries_0_1_data_1}, {tlb_sectored_entries_0_1_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] tlb__entries_WIRE_3 = _GEN_8[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13] wire [3:0][41:0] _GEN_9 = {{tlb_sectored_entries_0_2_data_3}, {tlb_sectored_entries_0_2_data_2}, {tlb_sectored_entries_0_2_data_1}, {tlb_sectored_entries_0_2_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] tlb__entries_WIRE_5 = _GEN_9[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13] wire [3:0][41:0] _GEN_10 = {{tlb_sectored_entries_0_3_data_3}, {tlb_sectored_entries_0_3_data_2}, {tlb_sectored_entries_0_3_data_1}, {tlb_sectored_entries_0_3_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] tlb__entries_WIRE_7 = _GEN_10[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13] wire [3:0][41:0] _GEN_11 = {{tlb_sectored_entries_0_4_data_3}, {tlb_sectored_entries_0_4_data_2}, {tlb_sectored_entries_0_4_data_1}, {tlb_sectored_entries_0_4_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] tlb__entries_WIRE_9 = _GEN_11[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13] wire [3:0][41:0] _GEN_12 = {{tlb_sectored_entries_0_5_data_3}, {tlb_sectored_entries_0_5_data_2}, {tlb_sectored_entries_0_5_data_1}, {tlb_sectored_entries_0_5_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] tlb__entries_WIRE_11 = _GEN_12[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13] wire [3:0][41:0] _GEN_13 = {{tlb_sectored_entries_0_6_data_3}, {tlb_sectored_entries_0_6_data_2}, {tlb_sectored_entries_0_6_data_1}, {tlb_sectored_entries_0_6_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] tlb__entries_WIRE_13 = _GEN_13[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13] wire [3:0][41:0] _GEN_14 = {{tlb_sectored_entries_0_7_data_3}, {tlb_sectored_entries_0_7_data_2}, {tlb_sectored_entries_0_7_data_1}, {tlb_sectored_entries_0_7_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] tlb__entries_WIRE_15 = _GEN_14[s1_tlb_req_vaddr[13:12]]; // @[package.scala:163:13] wire [19:0] tlb_ppn = (tlb_hitsVec_0 ? _tlb_entries_barrier_io_y_ppn : 20'h0) | (tlb_hitsVec_1 ? _tlb_entries_barrier_1_io_y_ppn : 20'h0) | (tlb_hitsVec_2 ? _tlb_entries_barrier_2_io_y_ppn : 20'h0) | (tlb_hitsVec_3 ? _tlb_entries_barrier_3_io_y_ppn : 20'h0) | (tlb_hitsVec_4 ? _tlb_entries_barrier_4_io_y_ppn : 20'h0) | (tlb_hitsVec_5 ? _tlb_entries_barrier_5_io_y_ppn : 20'h0) | (tlb_hitsVec_6 ? _tlb_entries_barrier_6_io_y_ppn : 20'h0) | (tlb_hitsVec_7 ? _tlb_entries_barrier_7_io_y_ppn : 20'h0) | (tlb_hitsVec_8 ? {_tlb_entries_barrier_8_io_y_ppn[19:18], (tlb_ignore_1 ? s1_tlb_req_vaddr[29:21] : 9'h0) | _tlb_entries_barrier_8_io_y_ppn[17:9], s1_tlb_req_vaddr[20:12] | _tlb_entries_barrier_8_io_y_ppn[8:0]} : 20'h0) | (tlb_hitsVec_9 ? {_tlb_entries_barrier_9_io_y_ppn[19:18], (tlb_ignore_4 ? s1_tlb_req_vaddr[29:21] : 9'h0) | _tlb_entries_barrier_9_io_y_ppn[17:9], s1_tlb_req_vaddr[20:12] | _tlb_entries_barrier_9_io_y_ppn[8:0]} : 20'h0) | (tlb_hitsVec_10 ? {_tlb_entries_barrier_10_io_y_ppn[19:18], (tlb_ignore_7 ? s1_tlb_req_vaddr[29:21] : 9'h0) | _tlb_entries_barrier_10_io_y_ppn[17:9], s1_tlb_req_vaddr[20:12] | _tlb_entries_barrier_10_io_y_ppn[8:0]} : 20'h0) | (tlb_hitsVec_11 ? {_tlb_entries_barrier_11_io_y_ppn[19:18], (tlb_ignore_10 ? s1_tlb_req_vaddr[29:21] : 9'h0) | _tlb_entries_barrier_11_io_y_ppn[17:9], s1_tlb_req_vaddr[20:12] | _tlb_entries_barrier_11_io_y_ppn[8:0]} : 20'h0) | (tlb_hitsVec_12 ? {_tlb_entries_barrier_12_io_y_ppn[19:18], (tlb_ignore_13 ? s1_tlb_req_vaddr[29:21] : 9'h0) | _tlb_entries_barrier_12_io_y_ppn[17:9], (tlb_special_entry_level[1] ? 9'h0 : s1_tlb_req_vaddr[20:12]) | _tlb_entries_barrier_12_io_y_ppn[8:0]} : 20'h0) | (tlb_vm_enabled ? 20'h0 : s1_tlb_req_vaddr[31:12]); // @[Mux.scala:30:73] wire tlb_bad_va = tlb_vm_enabled & ~(s1_tlb_req_vaddr[39:38] == 2'h0 | (&(s1_tlb_req_vaddr[39:38]))); // @[TLB.scala:399:{45,61}, :559:43, :560:{37,51,59,86}, :568:34] wire tlb_tlb_miss = tlb_vm_enabled & ~tlb_bad_va & tlb_real_hits == 13'h0; // @[package.scala:45:27] reg [6:0] tlb_state_vec_0; // @[Replacement.scala:305:17] reg [2:0] tlb_state_reg_1; // @[Replacement.scala:168:70] wire tlb_multipleHits_rightOne_1 = tlb_hitsVec_1 | tlb_hitsVec_2; // @[Misc.scala:183:16] wire tlb_multipleHits_leftOne_2 = tlb_hitsVec_0 | tlb_multipleHits_rightOne_1; // @[Misc.scala:183:16] wire tlb_multipleHits_rightOne_3 = tlb_hitsVec_4 | tlb_hitsVec_5; // @[Misc.scala:183:16] wire tlb_multipleHits_rightOne_4 = tlb_hitsVec_3 | tlb_multipleHits_rightOne_3; // @[Misc.scala:183:16] wire tlb_multipleHits_rightOne_6 = tlb_hitsVec_7 | tlb_hitsVec_8; // @[Misc.scala:183:16] wire tlb_multipleHits_leftOne_8 = tlb_hitsVec_6 | tlb_multipleHits_rightOne_6; // @[Misc.scala:183:16] wire tlb_multipleHits_leftOne_10 = tlb_hitsVec_9 | tlb_hitsVec_10; // @[Misc.scala:183:16] wire tlb_multipleHits_rightOne_9 = tlb_hitsVec_11 | tlb_hitsVec_12; // @[Misc.scala:183:16] wire tlb_multipleHits_rightOne_10 = tlb_multipleHits_leftOne_10 | tlb_multipleHits_rightOne_9; // @[Misc.scala:183:16] wire tlb_multipleHits = tlb_hitsVec_1 & tlb_hitsVec_2 | tlb_hitsVec_0 & tlb_multipleHits_rightOne_1 | tlb_hitsVec_4 & tlb_hitsVec_5 | tlb_hitsVec_3 & tlb_multipleHits_rightOne_3 | tlb_multipleHits_leftOne_2 & tlb_multipleHits_rightOne_4 | tlb_hitsVec_7 & tlb_hitsVec_8 | tlb_hitsVec_6 & tlb_multipleHits_rightOne_6 | tlb_hitsVec_9 & tlb_hitsVec_10 | tlb_hitsVec_11 & tlb_hitsVec_12 | tlb_multipleHits_leftOne_10 & tlb_multipleHits_rightOne_9 | tlb_multipleHits_leftOne_8 & tlb_multipleHits_rightOne_10 | (tlb_multipleHits_leftOne_2 | tlb_multipleHits_rightOne_4) & (tlb_multipleHits_leftOne_8 | tlb_multipleHits_rightOne_10); // @[Misc.scala:183:{16,37,49,61}] wire tlb_io_req_ready = tlb_state == 2'h0; // @[TLB.scala:352:22, :631:25] wire _GEN_15 = metaArb_io_in_2_valid | metaArb_io_in_3_valid; // @[Arbiter.scala:145:26, :147:19] wire metaArb_io_out_bits_write = resetting | metaArb_io_in_2_valid | metaArb_io_in_3_valid | metaArb_io_in_4_valid; // @[Arbiter.scala:145:26, :147:19] wire metaArb__grant_T_2 = resetting | metaArb_io_in_2_valid | metaArb_io_in_3_valid; // @[Arbiter.scala:45:68] wire metaArb__grant_T_3 = metaArb__grant_T_2 | metaArb_io_in_4_valid; // @[Arbiter.scala:45:68] wire metaArb__grant_T_5 = metaArb__grant_T_3 | metaArb_io_in_6_valid; // @[Arbiter.scala:45:68] wire metaArb_io_out_valid = metaArb__grant_T_5 | io_cpu_req_valid; // @[Arbiter.scala:45:68, :154:31] wire dataArb__grant_T = dataArb_io_in_0_valid | dataArb_io_in_1_valid; // @[Arbiter.scala:45:68] wire dataArb__grant_T_1 = dataArb__grant_T | dataArb_io_in_2_valid; // @[Arbiter.scala:45:68] wire dataArb_io_out_valid = dataArb__grant_T_1 | dataArb_io_in_3_valid; // @[Arbiter.scala:45:68, :154:31] reg s1_valid; // @[DCache.scala:182:25] reg s1_probe; // @[DCache.scala:183:25] reg [1:0] probe_bits_param; // @[DCache.scala:184:29] reg [3:0] probe_bits_size; // @[DCache.scala:184:29] reg probe_bits_source; // @[DCache.scala:184:29] reg [31:0] probe_bits_address; // @[DCache.scala:184:29] wire s1_valid_masked = s1_valid & ~io_cpu_s1_kill; // @[DCache.scala:182:25, :186:{34,37}] reg [39:0] s1_vaddr; // @[DCache.scala:196:25] reg [6:0] s1_req_tag; // @[DCache.scala:196:25] reg [4:0] s1_req_cmd; // @[DCache.scala:196:25] reg [1:0] s1_req_size; // @[DCache.scala:196:25] reg s1_req_signed; // @[DCache.scala:196:25] reg [1:0] s1_req_dprv; // @[DCache.scala:196:25] reg s1_req_dv; // @[DCache.scala:196:25] reg [1:0] s1_tlb_req_size; // @[DCache.scala:208:29] reg [4:0] s1_tlb_req_cmd; // @[DCache.scala:208:29] wire _io_cpu_perf_canAcceptLoadThenLoad_T_1 = s1_req_cmd == 5'h0; // @[package.scala:16:47] wire _io_cpu_perf_canAcceptLoadThenLoad_T_2 = s1_req_cmd == 5'h10; // @[package.scala:16:47] wire _io_cpu_perf_canAcceptLoadThenLoad_T_3 = s1_req_cmd == 5'h6; // @[package.scala:16:47] wire _io_cpu_perf_canAcceptLoadThenLoad_T_29 = s1_req_cmd == 5'h7; // @[package.scala:16:47] wire _io_cpu_perf_canAcceptLoadThenLoad_T_31 = s1_req_cmd == 5'h4; // @[package.scala:16:47] wire _io_cpu_perf_canAcceptLoadThenLoad_T_32 = s1_req_cmd == 5'h9; // @[package.scala:16:47] wire _io_cpu_perf_canAcceptLoadThenLoad_T_33 = s1_req_cmd == 5'hA; // @[package.scala:16:47] wire _io_cpu_perf_canAcceptLoadThenLoad_T_34 = s1_req_cmd == 5'hB; // @[package.scala:16:47] wire _io_cpu_perf_canAcceptLoadThenLoad_T_38 = s1_req_cmd == 5'h8; // @[package.scala:16:47] wire _io_cpu_perf_canAcceptLoadThenLoad_T_39 = s1_req_cmd == 5'hC; // @[package.scala:16:47] wire _io_cpu_perf_canAcceptLoadThenLoad_T_40 = s1_req_cmd == 5'hD; // @[package.scala:16:47] wire _io_cpu_perf_canAcceptLoadThenLoad_T_41 = s1_req_cmd == 5'hE; // @[package.scala:16:47] wire _io_cpu_perf_canAcceptLoadThenLoad_T_42 = s1_req_cmd == 5'hF; // @[package.scala:16:47] wire s1_read = _io_cpu_perf_canAcceptLoadThenLoad_T_1 | _io_cpu_perf_canAcceptLoadThenLoad_T_2 | _io_cpu_perf_canAcceptLoadThenLoad_T_3 | _io_cpu_perf_canAcceptLoadThenLoad_T_29 | _io_cpu_perf_canAcceptLoadThenLoad_T_31 | _io_cpu_perf_canAcceptLoadThenLoad_T_32 | _io_cpu_perf_canAcceptLoadThenLoad_T_33 | _io_cpu_perf_canAcceptLoadThenLoad_T_34 | _io_cpu_perf_canAcceptLoadThenLoad_T_38 | _io_cpu_perf_canAcceptLoadThenLoad_T_39 | _io_cpu_perf_canAcceptLoadThenLoad_T_40 | _io_cpu_perf_canAcceptLoadThenLoad_T_41 | _io_cpu_perf_canAcceptLoadThenLoad_T_42; // @[package.scala:16:47, :81:59] wire _io_cpu_perf_canAcceptLoadThenLoad_T_26 = s1_req_cmd == 5'h1; // @[DCache.scala:196:25] wire _io_cpu_perf_canAcceptLoadThenLoad_T_51 = s1_req_cmd == 5'h11; // @[DCache.scala:196:25] wire s1_write = _io_cpu_perf_canAcceptLoadThenLoad_T_26 | _io_cpu_perf_canAcceptLoadThenLoad_T_51 | _io_cpu_perf_canAcceptLoadThenLoad_T_29 | _io_cpu_perf_canAcceptLoadThenLoad_T_31 | _io_cpu_perf_canAcceptLoadThenLoad_T_32 | _io_cpu_perf_canAcceptLoadThenLoad_T_33 | _io_cpu_perf_canAcceptLoadThenLoad_T_34 | _io_cpu_perf_canAcceptLoadThenLoad_T_38 | _io_cpu_perf_canAcceptLoadThenLoad_T_39 | _io_cpu_perf_canAcceptLoadThenLoad_T_40 | _io_cpu_perf_canAcceptLoadThenLoad_T_41 | _io_cpu_perf_canAcceptLoadThenLoad_T_42; // @[package.scala:16:47, :81:59] wire s1_sfence = s1_req_cmd == 5'h14 | s1_req_cmd == 5'h15 | s1_req_cmd == 5'h16; // @[DCache.scala:196:25, :213:{30,43,57,71,85}] reg s1_flush_valid; // @[DCache.scala:215:27] reg cached_grant_wait; // @[DCache.scala:223:34] reg [8:0] flushCounter; // @[DCache.scala:225:29] reg release_ack_wait; // @[DCache.scala:226:33] reg [31:0] release_ack_addr; // @[DCache.scala:227:29] reg [3:0] release_state; // @[DCache.scala:228:30] wire _canAcceptCachedGrant_T = release_state == 4'h1; // @[package.scala:16:47] wire _inWriteback_T_1 = release_state == 4'h2; // @[package.scala:16:47] wire inWriteback = _canAcceptCachedGrant_T | _inWriteback_T_1; // @[package.scala:16:47, :81:59] wire _io_cpu_req_ready_T_4 = release_state == 4'h0 & ~cached_grant_wait & ~s1_nack; // @[DCache.scala:187:41, :223:34, :228:30, :233:{38,51,54,73}, :571:36, :824:21, :839:24] reg uncachedInFlight_0; // @[DCache.scala:236:33] reg [39:0] uncachedReqs_0_addr; // @[DCache.scala:237:25] reg [6:0] uncachedReqs_0_tag; // @[DCache.scala:237:25] reg [1:0] uncachedReqs_0_size; // @[DCache.scala:237:25] reg uncachedReqs_0_signed; // @[DCache.scala:237:25] wire _pstore_drain_opportunistic_T = io_cpu_req_bits_cmd == 5'h0; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_1 = io_cpu_req_bits_cmd == 5'h10; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_2 = io_cpu_req_bits_cmd == 5'h6; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_28 = io_cpu_req_bits_cmd == 5'h7; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_30 = io_cpu_req_bits_cmd == 5'h4; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_31 = io_cpu_req_bits_cmd == 5'h9; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_32 = io_cpu_req_bits_cmd == 5'hA; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_33 = io_cpu_req_bits_cmd == 5'hB; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_37 = io_cpu_req_bits_cmd == 5'h8; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_38 = io_cpu_req_bits_cmd == 5'hC; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_39 = io_cpu_req_bits_cmd == 5'hD; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_40 = io_cpu_req_bits_cmd == 5'hE; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_41 = io_cpu_req_bits_cmd == 5'hF; // @[package.scala:16:47] wire _pstore_drain_opportunistic_T_25 = io_cpu_req_bits_cmd == 5'h1; // @[package.scala:16:47] wire _pstore_drain_opportunistic_res_T_1 = io_cpu_req_bits_cmd == 5'h3; // @[package.scala:16:47] wire _dataArb_io_in_3_valid_res_T_2 = _pstore_drain_opportunistic_T_25 | _pstore_drain_opportunistic_res_T_1; // @[package.scala:16:47, :81:59] wire _pstore_drain_opportunistic_T_50 = io_cpu_req_bits_cmd == 5'h11; // @[Consts.scala:90:49] assign dataArb_io_in_3_valid = io_cpu_req_valid & ~_dataArb_io_in_3_valid_res_T_2; // @[package.scala:81:59] assign dataArb_io_in_3_bits_addr = io_cpu_req_bits_addr[11:0]; // @[DCache.scala:245:30] wire _GEN_16 = dataArb__grant_T_1 & (_pstore_drain_opportunistic_T | _pstore_drain_opportunistic_T_1 | _pstore_drain_opportunistic_T_2 | _pstore_drain_opportunistic_T_28 | _pstore_drain_opportunistic_T_30 | _pstore_drain_opportunistic_T_31 | _pstore_drain_opportunistic_T_32 | _pstore_drain_opportunistic_T_33 | _pstore_drain_opportunistic_T_37 | _pstore_drain_opportunistic_T_38 | _pstore_drain_opportunistic_T_39 | _pstore_drain_opportunistic_T_40 | _pstore_drain_opportunistic_T_41); // @[Arbiter.scala:45:68] reg s1_did_read; // @[DCache.scala:259:30] assign metaArb_io_in_7_bits_idx = io_cpu_req_bits_addr[11:6]; // @[DCache.scala:263:58] wire s1_cmd_uses_tlb = s1_read | s1_write | s1_req_cmd == 5'h5 & s1_req_size[0] | s1_req_cmd == 5'h17; // @[package.scala:81:59] wire tlb_io_req_valid = s1_valid & ~io_cpu_s1_kill & s1_cmd_uses_tlb; // @[DCache.scala:182:25, :186:37, :212:30, :270:{38,55}, :273:{52,71}] wire _GEN_17 = ~tlb_io_req_ready & ~io_ptw_resp_valid & ~io_cpu_req_bits_phys; // @[TLB.scala:631:25] wire _GEN_18 = s1_valid & s1_cmd_uses_tlb & (io_ptw_resp_valid | tlb_tlb_miss | tlb_multipleHits); // @[Misc.scala:183:{37,49}] wire tlb_io_sfence_valid = s1_valid & ~io_cpu_s1_kill & s1_sfence; // @[DCache.scala:182:25, :186:37, :213:{43,71}, :278:{35,54}] assign rockettile_dcache_tag_array_MPORT_en = metaArb_io_out_valid & metaArb_io_out_bits_write; // @[Arbiter.scala:145:26, :147:19, :154:31] assign rockettile_dcache_tag_array_s1_meta_en = metaArb_io_out_valid & ~metaArb_io_out_bits_write; // @[Arbiter.scala:145:26, :147:19, :154:31] wire _s1_meta_hit_state_T = _rockettile_dcache_tag_array_RW0_rdata[19:0] == tlb_ppn; // @[Mux.scala:30:73] wire _s1_meta_hit_state_T_4 = _rockettile_dcache_tag_array_RW0_rdata[41:22] == tlb_ppn; // @[Mux.scala:30:73] wire _s1_meta_hit_state_T_8 = _rockettile_dcache_tag_array_RW0_rdata[63:44] == tlb_ppn; // @[Mux.scala:30:73] wire _s1_meta_hit_state_T_12 = _rockettile_dcache_tag_array_RW0_rdata[85:66] == tlb_ppn; // @[Mux.scala:30:73] wire _s1_meta_hit_state_T_16 = _rockettile_dcache_tag_array_RW0_rdata[107:88] == tlb_ppn; // @[Mux.scala:30:73] wire _s1_meta_hit_state_T_20 = _rockettile_dcache_tag_array_RW0_rdata[129:110] == tlb_ppn; // @[Mux.scala:30:73] wire _s1_meta_hit_state_T_24 = _rockettile_dcache_tag_array_RW0_rdata[151:132] == tlb_ppn; // @[Mux.scala:30:73] wire _s1_meta_hit_state_T_28 = _rockettile_dcache_tag_array_RW0_rdata[173:154] == tlb_ppn; // @[Mux.scala:30:73] wire [7:0] s1_hit_way = {(|(_rockettile_dcache_tag_array_RW0_rdata[175:174])) & _s1_meta_hit_state_T_28, (|(_rockettile_dcache_tag_array_RW0_rdata[153:152])) & _s1_meta_hit_state_T_24, (|(_rockettile_dcache_tag_array_RW0_rdata[131:130])) & _s1_meta_hit_state_T_20, (|(_rockettile_dcache_tag_array_RW0_rdata[109:108])) & _s1_meta_hit_state_T_16, (|(_rockettile_dcache_tag_array_RW0_rdata[87:86])) & _s1_meta_hit_state_T_12, (|(_rockettile_dcache_tag_array_RW0_rdata[65:64])) & _s1_meta_hit_state_T_8, (|(_rockettile_dcache_tag_array_RW0_rdata[43:42])) & _s1_meta_hit_state_T_4, (|(_rockettile_dcache_tag_array_RW0_rdata[21:20])) & _s1_meta_hit_state_T}; // @[package.scala:45:27] wire [1:0] _s1_mask_xwr_T = {s1_vaddr[0] | (|s1_req_size), ~(s1_vaddr[0])}; // @[DCache.scala:196:25] wire [3:0] _s1_mask_xwr_T_1 = {(s1_vaddr[1] ? _s1_mask_xwr_T : 2'h0) | {2{s1_req_size[1]}}, s1_vaddr[1] ? 2'h0 : _s1_mask_xwr_T}; // @[DCache.scala:196:25] wire [7:0] s1_mask_xwr = {(s1_vaddr[2] ? _s1_mask_xwr_T_1 : 4'h0) | {4{&s1_req_size}}, s1_vaddr[2] ? 4'h0 : _s1_mask_xwr_T_1}; // @[DCache.scala:196:25] reg s2_valid; // @[DCache.scala:331:25] wire s2_valid_no_xcpt = s2_valid & {io_cpu_s2_xcpt_ma_ld_0, io_cpu_s2_xcpt_ma_st_0, io_cpu_s2_xcpt_pf_ld_0, io_cpu_s2_xcpt_pf_st_0, io_cpu_s2_xcpt_ae_ld_0, io_cpu_s2_xcpt_ae_st_0} == 6'h0; // @[DCache.scala:331:25, :332:{35,54,61}, :933:24] reg s2_probe; // @[DCache.scala:333:25] wire [4:0] _GEN_19 = {s1_probe | s2_probe, release_state}; // @[DCache.scala:183:25, :228:30, :333:25, :334:{34,46,63}] reg s2_not_nacked_in_s1; // @[DCache.scala:335:36] wire s2_valid_masked = s2_valid_no_xcpt & s2_not_nacked_in_s1; // @[DCache.scala:332:35, :335:36, :337:42] reg [6:0] s2_req_tag; // @[DCache.scala:339:19] reg [4:0] s2_req_cmd; // @[DCache.scala:339:19] reg [1:0] s2_req_size; // @[DCache.scala:339:19] reg s2_req_signed; // @[DCache.scala:339:19] reg [1:0] s2_req_dprv; // @[DCache.scala:339:19] reg s2_req_dv; // @[DCache.scala:339:19] reg s2_tlb_xcpt_pf_ld; // @[DCache.scala:342:24] reg s2_tlb_xcpt_pf_st; // @[DCache.scala:342:24] reg s2_tlb_xcpt_ae_ld; // @[DCache.scala:342:24] reg s2_tlb_xcpt_ae_st; // @[DCache.scala:342:24] reg s2_tlb_xcpt_ma_ld; // @[DCache.scala:342:24] reg s2_tlb_xcpt_ma_st; // @[DCache.scala:342:24] reg s2_pma_cacheable; // @[DCache.scala:343:19] reg [39:0] s2_uncached_resp_addr; // @[DCache.scala:344:34] reg [39:0] s2_vaddr_r; // @[DCache.scala:351:31] wire s2_lr = s2_req_cmd == 5'h6; // @[package.scala:16:47] wire s2_sc = s2_req_cmd == 5'h7; // @[package.scala:16:47] wire _io_cpu_store_pending_T_5 = s2_req_cmd == 5'h4; // @[package.scala:16:47] wire _io_cpu_store_pending_T_6 = s2_req_cmd == 5'h9; // @[package.scala:16:47] wire _io_cpu_store_pending_T_7 = s2_req_cmd == 5'hA; // @[package.scala:16:47] wire _io_cpu_store_pending_T_8 = s2_req_cmd == 5'hB; // @[package.scala:16:47] wire _io_cpu_store_pending_T_12 = s2_req_cmd == 5'h8; // @[package.scala:16:47] wire _io_cpu_store_pending_T_13 = s2_req_cmd == 5'hC; // @[package.scala:16:47] wire _io_cpu_store_pending_T_14 = s2_req_cmd == 5'hD; // @[package.scala:16:47] wire _io_cpu_store_pending_T_15 = s2_req_cmd == 5'hE; // @[package.scala:16:47] wire _io_cpu_store_pending_T_16 = s2_req_cmd == 5'hF; // @[package.scala:16:47] wire s2_read = s2_req_cmd == 5'h0 | s2_req_cmd == 5'h10 | s2_lr | s2_sc | _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_16; // @[package.scala:16:47, :81:59] wire _io_cpu_store_pending_T = s2_req_cmd == 5'h1; // @[DCache.scala:339:19] wire _io_cpu_store_pending_T_1 = s2_req_cmd == 5'h11; // @[DCache.scala:339:19] wire s2_write = _io_cpu_store_pending_T | _io_cpu_store_pending_T_1 | s2_sc | _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_16; // @[package.scala:16:47, :81:59] wire s2_readwrite = s2_read | s2_write; // @[package.scala:81:59] reg s2_flush_valid_pre_tag_ecc; // @[DCache.scala:355:43] reg [21:0] s2_meta_corrected_r; // @[DCache.scala:361:61] reg [21:0] s2_meta_corrected_r_1; // @[DCache.scala:361:61] reg [21:0] s2_meta_corrected_r_2; // @[DCache.scala:361:61] reg [21:0] s2_meta_corrected_r_3; // @[DCache.scala:361:61] reg [21:0] s2_meta_corrected_r_4; // @[DCache.scala:361:61] reg [21:0] s2_meta_corrected_r_5; // @[DCache.scala:361:61] reg [21:0] s2_meta_corrected_r_6; // @[DCache.scala:361:61] reg [21:0] s2_meta_corrected_r_7; // @[DCache.scala:361:61] reg [63:0] s2_data; // @[DCache.scala:379:18] reg [7:0] s2_probe_way; // @[DCache.scala:383:31] reg [1:0] s2_probe_state_state; // @[DCache.scala:384:33] reg [7:0] s2_hit_way; // @[DCache.scala:385:29] reg [1:0] s2_hit_state_state; // @[DCache.scala:386:31] wire _metaArb_io_in_3_bits_data_c_cat_T_46 = s2_req_cmd == 5'h3; // @[DCache.scala:339:19] wire [3:0] _r_T = {_io_cpu_store_pending_T | _io_cpu_store_pending_T_1 | s2_sc | _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_16, _io_cpu_store_pending_T | _io_cpu_store_pending_T_1 | s2_sc | _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_16 | _metaArb_io_in_3_bits_data_c_cat_T_46 | s2_lr, s2_hit_state_state}; // @[package.scala:16:47, :81:59] wire [1:0] _r_T_27 = {1'h0, _r_T == 4'hC}; // @[Misc.scala:35:36, :49:20] wire s2_hit = _r_T == 4'h3 | _r_T == 4'h2 | _r_T == 4'h1 | _r_T == 4'h7 | _r_T == 4'h6 | (&_r_T) | _r_T == 4'hE; // @[Misc.scala:35:9, :49:20] wire [15:0][1:0] _GEN_20 = {{2'h3}, {2'h3}, {2'h2}, {_r_T_27}, {_r_T_27}, {_r_T_27}, {_r_T_27}, {_r_T_27}, {2'h3}, {2'h2}, {2'h2}, {2'h1}, {2'h3}, {2'h2}, {2'h1}, {2'h0}}; // @[Misc.scala:35:36, :49:20] wire s2_valid_hit_maybe_flush_pre_data_ecc_and_waw = s2_valid_masked & s2_hit; // @[Misc.scala:35:9] wire s2_valid_hit_pre_data_ecc_and_waw = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw & s2_readwrite; // @[DCache.scala:354:30, :397:89, :418:89] wire s2_valid_flush_line = s2_valid_hit_maybe_flush_pre_data_ecc_and_waw & s2_req_cmd == 5'h5 & s2_req_size[0]; // @[DCache.scala:339:19, :340:{37,68}, :341:54, :397:89, :419:75] wire s2_valid_miss = s2_valid_masked & s2_readwrite & ~s2_hit; // @[Misc.scala:35:9] wire s2_valid_cached_miss = s2_valid_miss & s2_pma_cacheable & ~uncachedInFlight_0; // @[DCache.scala:236:33, :343:19, :423:{39,73}, :425:{44,60,63}] wire s2_want_victimize = s2_valid_cached_miss | s2_valid_flush_line | s2_flush_valid_pre_tag_ecc; // @[DCache.scala:341:54, :355:43, :419:75, :425:60, :427:{77,123}] wire s2_valid_uncached_pending = s2_valid_miss & ~s2_pma_cacheable & ~uncachedInFlight_0; // @[DCache.scala:236:33, :343:19, :423:{39,73}, :424:21, :430:{49,64,67}] reg [2:0] s2_victim_way_r; // @[DCache.scala:431:41] assign s2_victim_or_hit_way = (|s2_hit_state_state) ? s2_hit_way : 8'h1 << s2_victim_way_r; // @[OneHot.scala:58:35] wire _s2_victim_state_T = s2_victim_way_r == 3'h0; // @[Mux.scala:32:36] wire _s2_victim_state_T_1 = s2_victim_way_r == 3'h1; // @[Mux.scala:32:36] wire _s2_victim_state_T_2 = s2_victim_way_r == 3'h2; // @[Mux.scala:32:36] wire _s2_victim_state_T_3 = s2_victim_way_r == 3'h3; // @[Mux.scala:32:36] wire _s2_victim_state_T_4 = s2_victim_way_r == 3'h4; // @[Mux.scala:32:36] wire _s2_victim_state_T_5 = s2_victim_way_r == 3'h5; // @[Mux.scala:32:36] wire _s2_victim_state_T_6 = s2_victim_way_r == 3'h6; // @[Mux.scala:32:36] wire [1:0] s2_victim_state_state = (|s2_hit_state_state) ? s2_hit_state_state : (_s2_victim_state_T ? s2_meta_corrected_r[21:20] : 2'h0) | (_s2_victim_state_T_1 ? s2_meta_corrected_r_1[21:20] : 2'h0) | (_s2_victim_state_T_2 ? s2_meta_corrected_r_2[21:20] : 2'h0) | (_s2_victim_state_T_3 ? s2_meta_corrected_r_3[21:20] : 2'h0) | (_s2_victim_state_T_4 ? s2_meta_corrected_r_4[21:20] : 2'h0) | (_s2_victim_state_T_5 ? s2_meta_corrected_r_5[21:20] : 2'h0) | (_s2_victim_state_T_6 ? s2_meta_corrected_r_6[21:20] : 2'h0) | ((&s2_victim_way_r) ? s2_meta_corrected_r_7[21:20] : 2'h0); // @[Mux.scala:30:73, :32:36] wire [3:0] _r_T_59 = {probe_bits_param, s2_probe_state_state}; // @[Metadata.scala:120:19] wire _r_T_85 = _r_T_59 == 4'hB; // @[Misc.scala:56:20] wire _r_T_88 = _r_T_59 == 4'h4; // @[Misc.scala:56:20] wire _r_T_92 = _r_T_59 == 4'h5; // @[Misc.scala:56:20] wire _r_T_96 = _r_T_59 == 4'h6; // @[Misc.scala:56:20] wire _r_T_100 = _r_T_59 == 4'h7; // @[Misc.scala:56:20] wire _r_T_104 = _r_T_59 == 4'h0; // @[Misc.scala:56:20] wire _r_T_108 = _r_T_59 == 4'h1; // @[Misc.scala:56:20] wire _r_T_112 = _r_T_59 == 4'h2; // @[Misc.scala:56:20] wire _r_T_116 = _r_T_59 == 4'h3; // @[Misc.scala:56:20] wire s2_prb_ack_data = _r_T_116 | ~(_r_T_112 | _r_T_108 | _r_T_104) & (_r_T_100 | ~(_r_T_96 | _r_T_92 | _r_T_88) & _r_T_85); // @[Misc.scala:38:9, :56:20] wire _GEN_21 = _r_T_116 | _r_T_112; // @[Misc.scala:38:36, :56:20] wire s2_victim_dirty = &s2_victim_state_state; // @[Misc.scala:38:9, :56:20] wire io_cpu_s2_nack_0 = s2_valid_no_xcpt & ~(s2_valid_uncached_pending & auto_out_a_ready) & ~(s2_valid_masked & s2_req_cmd == 5'h17) & ~s2_valid_hit_pre_data_ecc_and_waw; // @[DCache.scala:332:35, :337:42, :339:19, :418:89, :430:{49,64}, :440:57, :441:61, :444:17, :445:{38,41,64,67,86,89}] assign metaArb_io_in_2_valid = s2_valid_hit_pre_data_ecc_and_waw & s2_hit_state_state != _GEN_20[_r_T]; // @[Misc.scala:35:36, :49:20] wire _GEN_22 = io_cpu_s2_nack_0 | metaArb_io_in_2_valid; // @[DCache.scala:445:{38,64,86}, :446:{24,62}] assign metaArb_io_in_4_bits_idx = probe_bits_address[11:6]; // @[DCache.scala:184:29, :1200:47] assign metaArb_io_in_3_bits_idx = s2_req_addr[11:6]; // @[DCache.scala:339:19, :453:76] assign metaArb_io_in_2_bits_data = {_GEN_20[_r_T], s2_req_addr[31:12]}; // @[Misc.scala:35:36, :49:20] reg [6:0] lrscCount; // @[DCache.scala:472:26] reg [33:0] lrscAddr; // @[DCache.scala:475:21] wire s2_sc_fail = s2_sc & ~((|(lrscCount[6:2])) & lrscAddr == s2_req_addr[39:6]); // @[package.scala:16:47] reg [4:0] pstore1_cmd; // @[DCache.scala:492:30] reg [39:0] pstore1_addr; // @[DCache.scala:493:31] reg [63:0] pstore1_data; // @[DCache.scala:494:31] reg [7:0] pstore1_way; // @[DCache.scala:495:30] reg [7:0] pstore1_mask; // @[DCache.scala:496:31] reg pstore1_rmw; // @[DCache.scala:498:44] wire _pstore1_held_T = s2_valid_hit_pre_data_ecc_and_waw & s2_write; // @[package.scala:81:59] reg pstore2_valid; // @[DCache.scala:501:30] wire _pstore_drain_opportunistic_res_T_2 = _pstore_drain_opportunistic_T_25 | _pstore_drain_opportunistic_res_T_1; // @[package.scala:16:47, :81:59] reg pstore_drain_on_miss_REG; // @[DCache.scala:503:56] reg pstore1_held; // @[DCache.scala:504:29] wire pstore1_valid_likely = s2_valid & s2_write | pstore1_held; // @[package.scala:81:59] wire pstore1_valid = _pstore1_held_T & ~s2_sc_fail | pstore1_held; // @[DCache.scala:477:26, :490:{46,58,61}, :504:29, :507:38] wire pstore_drain_structural = pstore1_valid_likely & pstore2_valid & (s1_valid & s1_write | pstore1_rmw); // @[package.scala:81:59] wire _dataArb_io_in_0_valid_T_4 = s2_valid_hit_pre_data_ecc_and_waw & s2_write; // @[package.scala:81:59] wire _dataArb_io_in_0_valid_T_9 = ~(io_cpu_req_valid & ~_pstore_drain_opportunistic_res_T_2) | (|_GEN_19) | pstore_drain_on_miss_REG; // @[package.scala:81:59] assign pstore_drain = pstore_drain_structural | ((_dataArb_io_in_0_valid_T_4 | pstore1_held) & ~pstore1_rmw | pstore2_valid) & _dataArb_io_in_0_valid_T_9; // @[DCache.scala:498:44, :501:30, :503:46, :504:29, :506:{72,96}, :509:{54,71}, :517:44, :518:{41,44,58,76,107}] reg [39:0] pstore2_addr; // @[DCache.scala:524:31] reg [7:0] pstore2_way; // @[DCache.scala:525:30] reg [7:0] pstore2_storegen_data_r; // @[DCache.scala:528:22] reg [7:0] pstore2_storegen_data_r_1; // @[DCache.scala:528:22] reg [7:0] pstore2_storegen_data_r_2; // @[DCache.scala:528:22] reg [7:0] pstore2_storegen_data_r_3; // @[DCache.scala:528:22] reg [7:0] pstore2_storegen_data_r_4; // @[DCache.scala:528:22] reg [7:0] pstore2_storegen_data_r_5; // @[DCache.scala:528:22] reg [7:0] pstore2_storegen_data_r_6; // @[DCache.scala:528:22] reg [7:0] pstore2_storegen_data_r_7; // @[DCache.scala:528:22] reg [7:0] pstore2_storegen_mask; // @[DCache.scala:531:19] assign dataArb_io_in_0_valid = pstore_drain_structural | ((_dataArb_io_in_0_valid_T_4 | pstore1_held) & ~pstore1_rmw | pstore2_valid) & _dataArb_io_in_0_valid_T_9; // @[DCache.scala:498:44, :501:30, :503:46, :504:29, :506:{72,96}, :509:{54,71}, :517:44, :518:{41,44,58,76,107}] assign _dataArb_io_in_0_bits_wordMask_wordMask_T = pstore2_valid ? pstore2_addr[11:0] : pstore1_addr[11:0]; // @[DCache.scala:493:31, :501:30, :524:31, :549:36] assign dataArb_io_in_0_bits_way_en = pstore2_valid ? pstore2_way : pstore1_way; // @[DCache.scala:495:30, :501:30, :525:30, :550:38] assign dataArb_io_in_0_bits_wdata = pstore2_valid ? {pstore2_storegen_data_r_7, pstore2_storegen_data_r_6, pstore2_storegen_data_r_5, pstore2_storegen_data_r_4, pstore2_storegen_data_r_3, pstore2_storegen_data_r_2, pstore2_storegen_data_r_1, pstore2_storegen_data_r} : pstore1_data; // @[package.scala:45:27] assign dataArb_io_in_0_bits_eccMask = pstore2_valid ? pstore2_storegen_mask : pstore1_mask; // @[DCache.scala:496:31, :501:30, :531:19, :557:47] wire _GEN_23 = s1_valid & s1_read & (pstore1_valid_likely & pstore1_addr[11:3] == s1_vaddr[11:3] & (s1_write ? (|(pstore1_mask & s1_mask_xwr)) : (|(pstore1_mask & s1_mask_xwr))) | pstore2_valid & pstore2_addr[11:3] == s1_vaddr[11:3] & (s1_write ? (|(pstore2_storegen_mask & s1_mask_xwr)) : (|(pstore2_storegen_mask & s1_mask_xwr)))); // @[package.scala:81:59] wire get_a_mask_sub_sub_size = s2_req_size == 2'h2; // @[Misc.scala:209:26] wire get_a_mask_sub_sub_0_1 = (&s2_req_size) | get_a_mask_sub_sub_size & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}] wire get_a_mask_sub_sub_1_1 = (&s2_req_size) | get_a_mask_sub_sub_size & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}] wire get_a_mask_sub_size = s2_req_size == 2'h1; // @[Misc.scala:209:26] wire get_a_mask_sub_0_2 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire get_a_mask_sub_0_1 = get_a_mask_sub_sub_0_1 | get_a_mask_sub_size & get_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire get_a_mask_sub_1_2 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27] wire get_a_mask_sub_1_1 = get_a_mask_sub_sub_0_1 | get_a_mask_sub_size & get_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire get_a_mask_sub_2_2 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire get_a_mask_sub_2_1 = get_a_mask_sub_sub_1_1 | get_a_mask_sub_size & get_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire get_a_mask_sub_3_2 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27] wire get_a_mask_sub_3_1 = get_a_mask_sub_sub_1_1 | get_a_mask_sub_size & get_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire put_a_mask_sub_sub_size = s2_req_size == 2'h2; // @[Misc.scala:209:26] wire put_a_mask_sub_sub_0_1 = (&s2_req_size) | put_a_mask_sub_sub_size & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}] wire put_a_mask_sub_sub_1_1 = (&s2_req_size) | put_a_mask_sub_sub_size & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}] wire put_a_mask_sub_size = s2_req_size == 2'h1; // @[Misc.scala:209:26] wire put_a_mask_sub_0_2 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire put_a_mask_sub_0_1 = put_a_mask_sub_sub_0_1 | put_a_mask_sub_size & put_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire put_a_mask_sub_1_2 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27] wire put_a_mask_sub_1_1 = put_a_mask_sub_sub_0_1 | put_a_mask_sub_size & put_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire put_a_mask_sub_2_2 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire put_a_mask_sub_2_1 = put_a_mask_sub_sub_1_1 | put_a_mask_sub_size & put_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire put_a_mask_sub_3_2 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27] wire put_a_mask_sub_3_1 = put_a_mask_sub_sub_1_1 | put_a_mask_sub_size & put_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size = s2_req_size == 2'h2; // @[Misc.scala:209:26] wire atomics_a_mask_sub_sub_0_1 = (&s2_req_size) | atomics_a_mask_sub_sub_size & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}] wire atomics_a_mask_sub_sub_1_1 = (&s2_req_size) | atomics_a_mask_sub_sub_size & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}] wire atomics_a_mask_sub_size = s2_req_size == 2'h1; // @[Misc.scala:209:26] wire atomics_a_mask_sub_0_2 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_0_1 = atomics_a_mask_sub_sub_0_1 | atomics_a_mask_sub_size & atomics_a_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_1_1 = atomics_a_mask_sub_sub_0_1 | atomics_a_mask_sub_size & atomics_a_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_2_1 = atomics_a_mask_sub_sub_1_1 | atomics_a_mask_sub_size & atomics_a_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_3_1 = atomics_a_mask_sub_sub_1_1 | atomics_a_mask_sub_size & atomics_a_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_1 = s2_req_size == 2'h2; // @[Misc.scala:209:26] wire atomics_a_mask_sub_sub_0_1_1 = (&s2_req_size) | atomics_a_mask_sub_sub_size_1 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}] wire atomics_a_mask_sub_sub_1_1_1 = (&s2_req_size) | atomics_a_mask_sub_sub_size_1 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}] wire atomics_a_mask_sub_size_1 = s2_req_size == 2'h1; // @[Misc.scala:209:26] wire atomics_a_mask_sub_0_2_1 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_0_1_1 = atomics_a_mask_sub_sub_0_1_1 | atomics_a_mask_sub_size_1 & atomics_a_mask_sub_0_2_1; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_1 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_1_1_1 = atomics_a_mask_sub_sub_0_1_1 | atomics_a_mask_sub_size_1 & atomics_a_mask_sub_1_2_1; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_1 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_2_1_1 = atomics_a_mask_sub_sub_1_1_1 | atomics_a_mask_sub_size_1 & atomics_a_mask_sub_2_2_1; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_1 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_3_1_1 = atomics_a_mask_sub_sub_1_1_1 | atomics_a_mask_sub_size_1 & atomics_a_mask_sub_3_2_1; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_2 = s2_req_size == 2'h2; // @[Misc.scala:209:26] wire atomics_a_mask_sub_sub_0_1_2 = (&s2_req_size) | atomics_a_mask_sub_sub_size_2 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}] wire atomics_a_mask_sub_sub_1_1_2 = (&s2_req_size) | atomics_a_mask_sub_sub_size_2 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}] wire atomics_a_mask_sub_size_2 = s2_req_size == 2'h1; // @[Misc.scala:209:26] wire atomics_a_mask_sub_0_2_2 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_0_1_2 = atomics_a_mask_sub_sub_0_1_2 | atomics_a_mask_sub_size_2 & atomics_a_mask_sub_0_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_2 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_1_1_2 = atomics_a_mask_sub_sub_0_1_2 | atomics_a_mask_sub_size_2 & atomics_a_mask_sub_1_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_2 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_2_1_2 = atomics_a_mask_sub_sub_1_1_2 | atomics_a_mask_sub_size_2 & atomics_a_mask_sub_2_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_2 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_3_1_2 = atomics_a_mask_sub_sub_1_1_2 | atomics_a_mask_sub_size_2 & atomics_a_mask_sub_3_2_2; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_3 = s2_req_size == 2'h2; // @[Misc.scala:209:26] wire atomics_a_mask_sub_sub_0_1_3 = (&s2_req_size) | atomics_a_mask_sub_sub_size_3 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}] wire atomics_a_mask_sub_sub_1_1_3 = (&s2_req_size) | atomics_a_mask_sub_sub_size_3 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}] wire atomics_a_mask_sub_size_3 = s2_req_size == 2'h1; // @[Misc.scala:209:26] wire atomics_a_mask_sub_0_2_3 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_0_1_3 = atomics_a_mask_sub_sub_0_1_3 | atomics_a_mask_sub_size_3 & atomics_a_mask_sub_0_2_3; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_3 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_1_1_3 = atomics_a_mask_sub_sub_0_1_3 | atomics_a_mask_sub_size_3 & atomics_a_mask_sub_1_2_3; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_3 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_2_1_3 = atomics_a_mask_sub_sub_1_1_3 | atomics_a_mask_sub_size_3 & atomics_a_mask_sub_2_2_3; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_3 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_3_1_3 = atomics_a_mask_sub_sub_1_1_3 | atomics_a_mask_sub_size_3 & atomics_a_mask_sub_3_2_3; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_4 = s2_req_size == 2'h2; // @[Misc.scala:209:26] wire atomics_a_mask_sub_sub_0_1_4 = (&s2_req_size) | atomics_a_mask_sub_sub_size_4 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}] wire atomics_a_mask_sub_sub_1_1_4 = (&s2_req_size) | atomics_a_mask_sub_sub_size_4 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}] wire atomics_a_mask_sub_size_4 = s2_req_size == 2'h1; // @[Misc.scala:209:26] wire atomics_a_mask_sub_0_2_4 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_0_1_4 = atomics_a_mask_sub_sub_0_1_4 | atomics_a_mask_sub_size_4 & atomics_a_mask_sub_0_2_4; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_4 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_1_1_4 = atomics_a_mask_sub_sub_0_1_4 | atomics_a_mask_sub_size_4 & atomics_a_mask_sub_1_2_4; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_4 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_2_1_4 = atomics_a_mask_sub_sub_1_1_4 | atomics_a_mask_sub_size_4 & atomics_a_mask_sub_2_2_4; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_4 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_3_1_4 = atomics_a_mask_sub_sub_1_1_4 | atomics_a_mask_sub_size_4 & atomics_a_mask_sub_3_2_4; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_5 = s2_req_size == 2'h2; // @[Misc.scala:209:26] wire atomics_a_mask_sub_sub_0_1_5 = (&s2_req_size) | atomics_a_mask_sub_sub_size_5 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}] wire atomics_a_mask_sub_sub_1_1_5 = (&s2_req_size) | atomics_a_mask_sub_sub_size_5 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}] wire atomics_a_mask_sub_size_5 = s2_req_size == 2'h1; // @[Misc.scala:209:26] wire atomics_a_mask_sub_0_2_5 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_0_1_5 = atomics_a_mask_sub_sub_0_1_5 | atomics_a_mask_sub_size_5 & atomics_a_mask_sub_0_2_5; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_5 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_1_1_5 = atomics_a_mask_sub_sub_0_1_5 | atomics_a_mask_sub_size_5 & atomics_a_mask_sub_1_2_5; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_5 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_2_1_5 = atomics_a_mask_sub_sub_1_1_5 | atomics_a_mask_sub_size_5 & atomics_a_mask_sub_2_2_5; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_5 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_3_1_5 = atomics_a_mask_sub_sub_1_1_5 | atomics_a_mask_sub_size_5 & atomics_a_mask_sub_3_2_5; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_6 = s2_req_size == 2'h2; // @[Misc.scala:209:26] wire atomics_a_mask_sub_sub_0_1_6 = (&s2_req_size) | atomics_a_mask_sub_sub_size_6 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}] wire atomics_a_mask_sub_sub_1_1_6 = (&s2_req_size) | atomics_a_mask_sub_sub_size_6 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}] wire atomics_a_mask_sub_size_6 = s2_req_size == 2'h1; // @[Misc.scala:209:26] wire atomics_a_mask_sub_0_2_6 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_0_1_6 = atomics_a_mask_sub_sub_0_1_6 | atomics_a_mask_sub_size_6 & atomics_a_mask_sub_0_2_6; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_6 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_1_1_6 = atomics_a_mask_sub_sub_0_1_6 | atomics_a_mask_sub_size_6 & atomics_a_mask_sub_1_2_6; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_6 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_2_1_6 = atomics_a_mask_sub_sub_1_1_6 | atomics_a_mask_sub_size_6 & atomics_a_mask_sub_2_2_6; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_6 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_3_1_6 = atomics_a_mask_sub_sub_1_1_6 | atomics_a_mask_sub_size_6 & atomics_a_mask_sub_3_2_6; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_7 = s2_req_size == 2'h2; // @[Misc.scala:209:26] wire atomics_a_mask_sub_sub_0_1_7 = (&s2_req_size) | atomics_a_mask_sub_sub_size_7 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}] wire atomics_a_mask_sub_sub_1_1_7 = (&s2_req_size) | atomics_a_mask_sub_sub_size_7 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}] wire atomics_a_mask_sub_size_7 = s2_req_size == 2'h1; // @[Misc.scala:209:26] wire atomics_a_mask_sub_0_2_7 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_0_1_7 = atomics_a_mask_sub_sub_0_1_7 | atomics_a_mask_sub_size_7 & atomics_a_mask_sub_0_2_7; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_7 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_1_1_7 = atomics_a_mask_sub_sub_0_1_7 | atomics_a_mask_sub_size_7 & atomics_a_mask_sub_1_2_7; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_7 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_2_1_7 = atomics_a_mask_sub_sub_1_1_7 | atomics_a_mask_sub_size_7 & atomics_a_mask_sub_2_2_7; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_7 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_3_1_7 = atomics_a_mask_sub_sub_1_1_7 | atomics_a_mask_sub_size_7 & atomics_a_mask_sub_3_2_7; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_sub_size_8 = s2_req_size == 2'h2; // @[Misc.scala:209:26] wire atomics_a_mask_sub_sub_0_1_8 = (&s2_req_size) | atomics_a_mask_sub_sub_size_8 & ~(s2_req_addr[2]); // @[Misc.scala:206:21, :209:26, :210:26, :211:20, :215:{29,38}] wire atomics_a_mask_sub_sub_1_1_8 = (&s2_req_size) | atomics_a_mask_sub_sub_size_8 & s2_req_addr[2]; // @[Misc.scala:206:21, :209:26, :210:26, :215:{29,38}] wire atomics_a_mask_sub_size_8 = s2_req_size == 2'h1; // @[Misc.scala:209:26] wire atomics_a_mask_sub_0_2_8 = ~(s2_req_addr[2]) & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_0_1_8 = atomics_a_mask_sub_sub_0_1_8 | atomics_a_mask_sub_size_8 & atomics_a_mask_sub_0_2_8; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_1_2_8 = ~(s2_req_addr[2]) & s2_req_addr[1]; // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_1_1_8 = atomics_a_mask_sub_sub_0_1_8 | atomics_a_mask_sub_size_8 & atomics_a_mask_sub_1_2_8; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_2_2_8 = s2_req_addr[2] & ~(s2_req_addr[1]); // @[Misc.scala:210:26, :211:20, :214:27] wire atomics_a_mask_sub_2_1_8 = atomics_a_mask_sub_sub_1_1_8 | atomics_a_mask_sub_size_8 & atomics_a_mask_sub_2_2_8; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire atomics_a_mask_sub_3_2_8 = s2_req_addr[2] & s2_req_addr[1]; // @[Misc.scala:210:26, :214:27] wire atomics_a_mask_sub_3_1_8 = atomics_a_mask_sub_sub_1_1_8 | atomics_a_mask_sub_size_8 & atomics_a_mask_sub_3_2_8; // @[Misc.scala:209:26, :214:27, :215:{29,38}] wire [7:0] atomics_mask = _io_cpu_store_pending_T_16 ? {atomics_a_mask_sub_3_1_8 | atomics_a_mask_sub_3_2_8 & s2_req_addr[0], atomics_a_mask_sub_3_1_8 | atomics_a_mask_sub_3_2_8 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_8 | atomics_a_mask_sub_2_2_8 & s2_req_addr[0], atomics_a_mask_sub_2_1_8 | atomics_a_mask_sub_2_2_8 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_8 | atomics_a_mask_sub_1_2_8 & s2_req_addr[0], atomics_a_mask_sub_1_1_8 | atomics_a_mask_sub_1_2_8 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_8 | atomics_a_mask_sub_0_2_8 & s2_req_addr[0], atomics_a_mask_sub_0_1_8 | atomics_a_mask_sub_0_2_8 & ~(s2_req_addr[0])} : _io_cpu_store_pending_T_15 ? {atomics_a_mask_sub_3_1_7 | atomics_a_mask_sub_3_2_7 & s2_req_addr[0], atomics_a_mask_sub_3_1_7 | atomics_a_mask_sub_3_2_7 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_7 | atomics_a_mask_sub_2_2_7 & s2_req_addr[0], atomics_a_mask_sub_2_1_7 | atomics_a_mask_sub_2_2_7 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_7 | atomics_a_mask_sub_1_2_7 & s2_req_addr[0], atomics_a_mask_sub_1_1_7 | atomics_a_mask_sub_1_2_7 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_7 | atomics_a_mask_sub_0_2_7 & s2_req_addr[0], atomics_a_mask_sub_0_1_7 | atomics_a_mask_sub_0_2_7 & ~(s2_req_addr[0])} : _io_cpu_store_pending_T_14 ? {atomics_a_mask_sub_3_1_6 | atomics_a_mask_sub_3_2_6 & s2_req_addr[0], atomics_a_mask_sub_3_1_6 | atomics_a_mask_sub_3_2_6 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_6 | atomics_a_mask_sub_2_2_6 & s2_req_addr[0], atomics_a_mask_sub_2_1_6 | atomics_a_mask_sub_2_2_6 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_6 | atomics_a_mask_sub_1_2_6 & s2_req_addr[0], atomics_a_mask_sub_1_1_6 | atomics_a_mask_sub_1_2_6 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_6 | atomics_a_mask_sub_0_2_6 & s2_req_addr[0], atomics_a_mask_sub_0_1_6 | atomics_a_mask_sub_0_2_6 & ~(s2_req_addr[0])} : _io_cpu_store_pending_T_13 ? {atomics_a_mask_sub_3_1_5 | atomics_a_mask_sub_3_2_5 & s2_req_addr[0], atomics_a_mask_sub_3_1_5 | atomics_a_mask_sub_3_2_5 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_5 | atomics_a_mask_sub_2_2_5 & s2_req_addr[0], atomics_a_mask_sub_2_1_5 | atomics_a_mask_sub_2_2_5 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_5 | atomics_a_mask_sub_1_2_5 & s2_req_addr[0], atomics_a_mask_sub_1_1_5 | atomics_a_mask_sub_1_2_5 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_5 | atomics_a_mask_sub_0_2_5 & s2_req_addr[0], atomics_a_mask_sub_0_1_5 | atomics_a_mask_sub_0_2_5 & ~(s2_req_addr[0])} : _io_cpu_store_pending_T_12 ? {atomics_a_mask_sub_3_1_4 | atomics_a_mask_sub_3_2_4 & s2_req_addr[0], atomics_a_mask_sub_3_1_4 | atomics_a_mask_sub_3_2_4 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_4 | atomics_a_mask_sub_2_2_4 & s2_req_addr[0], atomics_a_mask_sub_2_1_4 | atomics_a_mask_sub_2_2_4 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_4 | atomics_a_mask_sub_1_2_4 & s2_req_addr[0], atomics_a_mask_sub_1_1_4 | atomics_a_mask_sub_1_2_4 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_4 | atomics_a_mask_sub_0_2_4 & s2_req_addr[0], atomics_a_mask_sub_0_1_4 | atomics_a_mask_sub_0_2_4 & ~(s2_req_addr[0])} : _io_cpu_store_pending_T_8 ? {atomics_a_mask_sub_3_1_3 | atomics_a_mask_sub_3_2_3 & s2_req_addr[0], atomics_a_mask_sub_3_1_3 | atomics_a_mask_sub_3_2_3 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_3 | atomics_a_mask_sub_2_2_3 & s2_req_addr[0], atomics_a_mask_sub_2_1_3 | atomics_a_mask_sub_2_2_3 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_3 | atomics_a_mask_sub_1_2_3 & s2_req_addr[0], atomics_a_mask_sub_1_1_3 | atomics_a_mask_sub_1_2_3 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_3 | atomics_a_mask_sub_0_2_3 & s2_req_addr[0], atomics_a_mask_sub_0_1_3 | atomics_a_mask_sub_0_2_3 & ~(s2_req_addr[0])} : _io_cpu_store_pending_T_7 ? {atomics_a_mask_sub_3_1_2 | atomics_a_mask_sub_3_2_2 & s2_req_addr[0], atomics_a_mask_sub_3_1_2 | atomics_a_mask_sub_3_2_2 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_2 | atomics_a_mask_sub_2_2_2 & s2_req_addr[0], atomics_a_mask_sub_2_1_2 | atomics_a_mask_sub_2_2_2 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_2 | atomics_a_mask_sub_1_2_2 & s2_req_addr[0], atomics_a_mask_sub_1_1_2 | atomics_a_mask_sub_1_2_2 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_2 | atomics_a_mask_sub_0_2_2 & s2_req_addr[0], atomics_a_mask_sub_0_1_2 | atomics_a_mask_sub_0_2_2 & ~(s2_req_addr[0])} : _io_cpu_store_pending_T_6 ? {atomics_a_mask_sub_3_1_1 | atomics_a_mask_sub_3_2_1 & s2_req_addr[0], atomics_a_mask_sub_3_1_1 | atomics_a_mask_sub_3_2_1 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1_1 | atomics_a_mask_sub_2_2_1 & s2_req_addr[0], atomics_a_mask_sub_2_1_1 | atomics_a_mask_sub_2_2_1 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1_1 | atomics_a_mask_sub_1_2_1 & s2_req_addr[0], atomics_a_mask_sub_1_1_1 | atomics_a_mask_sub_1_2_1 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1_1 | atomics_a_mask_sub_0_2_1 & s2_req_addr[0], atomics_a_mask_sub_0_1_1 | atomics_a_mask_sub_0_2_1 & ~(s2_req_addr[0])} : _io_cpu_store_pending_T_5 ? {atomics_a_mask_sub_3_1 | atomics_a_mask_sub_3_2 & s2_req_addr[0], atomics_a_mask_sub_3_1 | atomics_a_mask_sub_3_2 & ~(s2_req_addr[0]), atomics_a_mask_sub_2_1 | atomics_a_mask_sub_2_2 & s2_req_addr[0], atomics_a_mask_sub_2_1 | atomics_a_mask_sub_2_2 & ~(s2_req_addr[0]), atomics_a_mask_sub_1_1 | atomics_a_mask_sub_1_2 & s2_req_addr[0], atomics_a_mask_sub_1_1 | atomics_a_mask_sub_1_2 & ~(s2_req_addr[0]), atomics_a_mask_sub_0_1 | atomics_a_mask_sub_0_2 & s2_req_addr[0], atomics_a_mask_sub_0_1 | atomics_a_mask_sub_0_2 & ~(s2_req_addr[0])} : 8'h0; // @[package.scala:16:47] wire tl_out_a_valid = s2_valid_uncached_pending | s2_valid_cached_miss & ~(release_ack_wait & (s2_req_addr[20:6] ^ release_ack_addr[20:6]) == 15'h0) & ~s2_victim_dirty; // @[Misc.scala:38:9] wire _GEN_24 = ~s2_write | _io_cpu_store_pending_T_1 | ~s2_read | _io_cpu_store_pending_T_16 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_5; // @[package.scala:16:47, :81:59] wire _io_errors_bus_valid_T = nodeOut_d_ready & auto_out_d_valid; // @[Decoupled.scala:51:35] wire [26:0] _r_beats1_decode_T = 27'hFFF << auto_out_d_bits_size; // @[package.scala:243:71] wire [8:0] r_beats1 = auto_out_d_bits_opcode[0] ? ~(_r_beats1_decode_T[11:3]) : 9'h0; // @[package.scala:243:{46,71,76}] reg [8:0] r_counter; // @[Edges.scala:229:27] wire [8:0] _r_counter1_T = r_counter - 9'h1; // @[Edges.scala:229:27, :230:28] wire d_last = r_counter == 9'h1 | r_beats1 == 9'h0; // @[Edges.scala:221:14, :229:27, :232:{25,33,43}] wire [8:0] r_4 = r_beats1 & ~_r_counter1_T; // @[Edges.scala:221:14, :230:28, :234:{25,27}] wire grantIsUncachedData = auto_out_d_bits_opcode == 3'h1; // @[package.scala:16:47] wire grantIsUncached = grantIsUncachedData | auto_out_d_bits_opcode == 3'h0 | auto_out_d_bits_opcode == 3'h2; // @[package.scala:16:47, :81:59] wire grantIsRefill = auto_out_d_bits_opcode == 3'h5; // @[package.scala:16:47] wire grantIsCached = auto_out_d_bits_opcode == 3'h4 | grantIsRefill; // @[package.scala:16:47, :81:59] wire grantIsVoluntary = auto_out_d_bits_opcode == 3'h6; // @[DCache.scala:665:32] reg grantInProgress; // @[DCache.scala:667:32] reg [2:0] blockProbeAfterGrantCount; // @[DCache.scala:668:42] wire _metaArb_io_in_4_valid_T = release_state == 4'h6; // @[package.scala:16:47] wire _nodeOut_c_valid_T_1 = release_state == 4'h9; // @[package.scala:16:47] wire _canAcceptCachedGrant_T_4 = _canAcceptCachedGrant_T | _metaArb_io_in_4_valid_T | _nodeOut_c_valid_T_1; // @[package.scala:16:47, :81:59] wire _GEN_25 = _io_errors_bus_valid_T & grantIsCached; // @[Decoupled.scala:51:35] wire _GEN_26 = auto_out_d_bits_source & d_last; // @[Edges.scala:232:33] wire _GEN_27 = grantIsRefill & dataArb_io_in_0_valid; // @[package.scala:16:47] wire nodeOut_e_valid = ~_GEN_27 & auto_out_d_valid & ~(|r_counter) & grantIsCached & ~_canAcceptCachedGrant_T_4; // @[package.scala:81:59] assign dataArb_io_in_1_bits_addr = {s2_req_addr[11:6] | r_4[8:3], r_4[2:0], 3'h0}; // @[Edges.scala:234:25] assign metaArb_io_in_3_valid = grantIsCached & d_last & _io_errors_bus_valid_T & ~auto_out_d_bits_denied; // @[Decoupled.scala:51:35] wire [3:0] _metaArb_io_in_3_bits_data_T_1 = {_io_cpu_store_pending_T | _io_cpu_store_pending_T_1 | s2_sc | _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_16, _io_cpu_store_pending_T | _io_cpu_store_pending_T_1 | s2_sc | _io_cpu_store_pending_T_5 | _io_cpu_store_pending_T_6 | _io_cpu_store_pending_T_7 | _io_cpu_store_pending_T_8 | _io_cpu_store_pending_T_12 | _io_cpu_store_pending_T_13 | _io_cpu_store_pending_T_14 | _io_cpu_store_pending_T_15 | _io_cpu_store_pending_T_16 | _metaArb_io_in_3_bits_data_c_cat_T_46 | s2_lr, auto_out_d_bits_param}; // @[package.scala:16:47, :81:59] assign metaArb_io_in_3_bits_data = {_metaArb_io_in_3_bits_data_T_1 == 4'hC ? 2'h3 : _metaArb_io_in_3_bits_data_T_1 == 4'h4 | _metaArb_io_in_3_bits_data_T_1 == 4'h0 ? 2'h2 : {1'h0, _metaArb_io_in_3_bits_data_T_1 == 4'h1}, s2_req_addr[31:12]}; // @[Metadata.scala:84:{18,38}] reg blockUncachedGrant; // @[DCache.scala:750:33] wire _GEN_28 = grantIsUncachedData & (blockUncachedGrant | s1_valid); // @[package.scala:16:47] assign nodeOut_d_ready = ~(_GEN_28 | _GEN_27) & (~grantIsCached | ((|r_counter) | auto_out_e_ready) & ~_canAcceptCachedGrant_T_4); // @[package.scala:81:59] wire io_cpu_req_ready_0 = _GEN_28 ? ~(auto_out_d_valid | _GEN_17 | metaArb__grant_T_5 | _GEN_16) & _io_cpu_req_ready_T_4 : ~(_GEN_17 | metaArb__grant_T_5 | _GEN_16) & _io_cpu_req_ready_T_4; // @[Arbiter.scala:45:68] wire _GEN_29 = _GEN_28 & auto_out_d_valid; // @[DCache.scala:721:26, :752:{31,68}, :755:29, :757:32] assign dataArb_io_in_1_valid = _GEN_29 | auto_out_d_valid & grantIsRefill & ~_canAcceptCachedGrant_T_4; // @[package.scala:16:47, :81:59] assign dataArb_io_in_1_bits_write = ~_GEN_28 | ~auto_out_d_valid; // @[DCache.scala:727:33, :752:{31,68}, :755:29, :758:37] wire block_probe_for_core_progress = (|blockProbeAfterGrantCount) | (|(lrscCount[6:2])); // @[DCache.scala:472:26, :473:29, :668:42, :669:35, :766:71] wire nodeOut_b_ready = ~metaArb__grant_T_3 & ~(block_probe_for_core_progress | (|_GEN_19) | release_ack_wait & (auto_out_b_bits_address[20:6] ^ release_ack_addr[20:6]) == 15'h0 | grantInProgress | s1_valid | s2_valid); // @[Arbiter.scala:45:{68,78}] wire _io_cpu_perf_release_T = auto_out_c_ready & nodeOut_c_valid; // @[Decoupled.scala:51:35] wire [26:0] _io_cpu_perf_release_beats1_decode_T = 27'hFFF << nodeOut_c_bits_size; // @[package.scala:243:71] wire [8:0] r_beats1_1 = nodeOut_c_bits_opcode[0] ? ~(_io_cpu_perf_release_beats1_decode_T[11:3]) : 9'h0; // @[package.scala:243:{46,71,76}] reg [8:0] r_counter_1; // @[Edges.scala:229:27] wire [8:0] _r_counter1_T_1 = r_counter_1 - 9'h1; // @[Edges.scala:229:27, :230:28] wire c_first = r_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire releaseDone = (r_counter_1 == 9'h1 | r_beats1_1 == 9'h0) & _io_cpu_perf_release_T; // @[Decoupled.scala:51:35] reg s1_release_data_valid; // @[DCache.scala:801:38] reg s2_release_data_valid; // @[DCache.scala:802:38] wire releaseRejected = s2_release_data_valid & ~_io_cpu_perf_release_T; // @[Decoupled.scala:51:35] wire [9:0] _releaseDataBeat_T_5 = {1'h0, r_beats1_1 & ~_r_counter1_T_1} + {8'h0, releaseRejected ? 2'h0 : {1'h0, s1_release_data_valid} + {1'h0, s2_release_data_valid}}; // @[Edges.scala:221:14, :230:28, :234:{25,27}] assign s1_nack = s2_probe ? s2_prb_ack_data | (|s2_probe_state_state) | ~releaseDone | _GEN_23 | _GEN_22 | _GEN_18 : _GEN_23 | _GEN_22 | _GEN_18; // @[Misc.scala:38:9] wire _GEN_30 = release_state == 4'h4; // @[DCache.scala:228:30, :841:25] assign metaArb_io_in_6_valid = _GEN_30 | auto_out_b_valid & (~block_probe_for_core_progress | (|lrscCount) & lrscCount < 7'h4); // @[DCache.scala:472:26, :473:29, :474:{34,40,43}, :766:71, :769:{26,44,48,79}, :841:{25,44}, :842:30] assign metaArb_io_in_6_bits_idx = _GEN_30 ? metaArb_io_in_4_bits_idx : auto_out_b_bits_address[11:6]; // @[DCache.scala:772:29, :841:{25,44}, :843:33, :1200:47] wire _GEN_31 = release_state == 4'h5; // @[DCache.scala:228:30, :850:25] wire _GEN_32 = release_state == 4'h3; // @[DCache.scala:228:30, :854:25] assign nodeOut_c_valid = _GEN_32 | _GEN_31 | s2_probe & ~s2_prb_ack_data | s2_release_data_valid & ~(c_first & release_ack_wait); // @[Misc.scala:38:9] wire _GEN_33 = _canAcceptCachedGrant_T | _metaArb_io_in_4_valid_T | _nodeOut_c_valid_T_1; // @[package.scala:16:47, :81:59] assign nodeOut_c_bits_opcode = _GEN_33 ? {2'h3, ~_nodeOut_c_valid_T_1} : {2'h2, _inWriteback_T_1}; // @[package.scala:16:47, :81:59] assign nodeOut_c_bits_size = _GEN_33 ? 4'h6 : probe_bits_size; // @[package.scala:81:59] assign releaseWay = _GEN_33 ? s2_victim_or_hit_way : s2_probe_way; // @[package.scala:81:59] assign dataArb_io_in_2_valid = inWriteback & _releaseDataBeat_T_5 < 10'h8; // @[package.scala:81:59] assign dataArb_io_in_2_bits_addr = {metaArb_io_in_4_bits_idx, _releaseDataBeat_T_5[2:0], 3'h0}; // @[DCache.scala:804:43, :903:{72,90}, :1200:47] assign metaArb_io_in_4_valid = _metaArb_io_in_4_valid_T | release_state == 4'h7; // @[package.scala:16:47, :81:59] assign metaArb_io_in_7_bits_data = {_GEN_33 ? 2'h0 : _GEN_21 ? 2'h2 : _r_T_108 ? 2'h1 : _r_T_104 ? 2'h0 : {1'h0, _r_T_100 | _r_T_96 | _r_T_92}, probe_bits_address[31:12]}; // @[package.scala:81:59] reg io_cpu_s2_xcpt_REG; // @[DCache.scala:933:32] assign io_cpu_s2_xcpt_pf_ld_0 = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_pf_ld; // @[DCache.scala:342:24, :933:{24,32}] assign io_cpu_s2_xcpt_pf_st_0 = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_pf_st; // @[DCache.scala:342:24, :933:{24,32}] assign io_cpu_s2_xcpt_ae_ld_0 = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ae_ld; // @[DCache.scala:342:24, :933:{24,32}] assign io_cpu_s2_xcpt_ae_st_0 = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ae_st; // @[DCache.scala:342:24, :933:{24,32}] assign io_cpu_s2_xcpt_ma_ld_0 = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ma_ld; // @[DCache.scala:342:24, :933:{24,32}] assign io_cpu_s2_xcpt_ma_st_0 = io_cpu_s2_xcpt_REG & s2_tlb_xcpt_ma_st; // @[DCache.scala:342:24, :933:{24,32}] reg doUncachedResp; // @[DCache.scala:948:31] wire io_cpu_replay_next_0 = _io_errors_bus_valid_T & grantIsUncachedData; // @[Decoupled.scala:51:35]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_29( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [16:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [16:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire d_release_ack = 1'h0; // @[Monitor.scala:673:46] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire d_release_ack_1 = 1'h0; // @[Monitor.scala:783:46] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [2:0] a_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] a_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] a_first_beats1_1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] a_first_count_1 = 3'h0; // @[Edges.scala:234:25] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27] wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_46 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_48 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_52 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_54 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_58 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_60 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_64 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_66 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_70 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_72 = 1'h1; // @[Parameters.scala:57:20] wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire a_first_last = 1'h1; // @[Edges.scala:232:33] wire d_first_beats1_opdata = 1'h1; // @[Edges.scala:106:36] wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire d_first_beats1_opdata_1 = 1'h1; // @[Edges.scala:106:36] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire d_first_beats1_opdata_2 = 1'h1; // @[Edges.scala:106:36] wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28] wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28] wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode = 3'h1; // @[Monitor.scala:36:7] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [259:0] _inflight_opcodes_T_4 = 260'hFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF; // @[Monitor.scala:815:62] wire [259:0] _inflight_sizes_T_4 = 260'hFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF; // @[Monitor.scala:816:58] wire [64:0] _inflight_T_4 = 65'h1FFFFFFFFFFFFFFFF; // @[Monitor.scala:814:46] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [16:0] _c_first_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _c_first_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [16:0] _c_first_WIRE_2_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _c_first_WIRE_3_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [16:0] _c_set_wo_ready_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _c_set_wo_ready_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [16:0] _c_set_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _c_set_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [16:0] _c_opcodes_set_interm_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _c_opcodes_set_interm_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [16:0] _c_sizes_set_interm_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _c_sizes_set_interm_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [16:0] _c_opcodes_set_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _c_opcodes_set_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [16:0] _c_sizes_set_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _c_sizes_set_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [16:0] _c_probe_ack_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _c_probe_ack_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [16:0] _c_probe_ack_WIRE_2_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _c_probe_ack_WIRE_3_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [16:0] _same_cycle_resp_WIRE_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _same_cycle_resp_WIRE_1_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [16:0] _same_cycle_resp_WIRE_2_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _same_cycle_resp_WIRE_3_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [16:0] _same_cycle_resp_WIRE_4_bits_address = 17'h0; // @[Bundles.scala:265:74] wire [16:0] _same_cycle_resp_WIRE_5_bits_address = 17'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34] wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34] wire [259:0] d_opcodes_clr_1 = 260'h0; // @[Monitor.scala:776:34] wire [259:0] d_sizes_clr_1 = 260'h0; // @[Monitor.scala:777:34] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34] wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34] wire [64:0] d_clr_1 = 65'h0; // @[Monitor.scala:774:34] wire [64:0] d_clr_wo_ready_1 = 65'h0; // @[Monitor.scala:775:34] wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54] wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52] wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79] wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51] wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35] wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35] wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_25 = io_in_a_bits_source_0[6:3]; // @[Monitor.scala:36:7] wire _source_ok_T_26 = _source_ok_T_25 == 4'h4; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31] wire _source_ok_T_31 = io_in_a_bits_source_0 == 7'h28; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_31; // @[Parameters.scala:1138:31] wire _source_ok_T_32 = io_in_a_bits_source_0 == 7'h29; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31] wire _source_ok_T_33 = io_in_a_bits_source_0 == 7'h2A; // @[Monitor.scala:36:7] wire _source_ok_WIRE_8 = _source_ok_T_33; // @[Parameters.scala:1138:31] wire _source_ok_T_34 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_9 = _source_ok_T_34; // @[Parameters.scala:1138:31] wire _source_ok_T_35 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_36 = _source_ok_T_35 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_37 = _source_ok_T_36 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_38 = _source_ok_T_37 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_39 = _source_ok_T_38 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_40 = _source_ok_T_39 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_41 = _source_ok_T_40 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_42 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [16:0] _is_aligned_T = {11'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 17'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_4 = _uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_9 = _uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_14 = _uncommonBits_T_14[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_24 = _uncommonBits_T_24[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_29 = _uncommonBits_T_29[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_34 = _uncommonBits_T_34[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_39 = _uncommonBits_T_39[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_44 = _uncommonBits_T_44[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_49 = _uncommonBits_T_49[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_54 = _uncommonBits_T_54[2:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_43 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_43; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_44 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_50 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_56 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_62 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_45 = _source_ok_T_44 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_47 = _source_ok_T_45; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_49 = _source_ok_T_47; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_49; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_51 = _source_ok_T_50 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_53 = _source_ok_T_51; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_55 = _source_ok_T_53; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_55; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_57 = _source_ok_T_56 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_59 = _source_ok_T_57; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_61 = _source_ok_T_59; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_61; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_63 = _source_ok_T_62 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_65 = _source_ok_T_63; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_67 = _source_ok_T_65; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_67; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_68 = io_in_d_bits_source_0[6:3]; // @[Monitor.scala:36:7] wire _source_ok_T_69 = _source_ok_T_68 == 4'h4; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_71 = _source_ok_T_69; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_73 = _source_ok_T_71; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_5 = _source_ok_T_73; // @[Parameters.scala:1138:31] wire _source_ok_T_74 = io_in_d_bits_source_0 == 7'h28; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_74; // @[Parameters.scala:1138:31] wire _source_ok_T_75 = io_in_d_bits_source_0 == 7'h29; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_75; // @[Parameters.scala:1138:31] wire _source_ok_T_76 = io_in_d_bits_source_0 == 7'h2A; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_8 = _source_ok_T_76; // @[Parameters.scala:1138:31] wire _source_ok_T_77 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_9 = _source_ok_T_77; // @[Parameters.scala:1138:31] wire _source_ok_T_78 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_79 = _source_ok_T_78 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_80 = _source_ok_T_79 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_81 = _source_ok_T_80 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_82 = _source_ok_T_81 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_83 = _source_ok_T_82 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_84 = _source_ok_T_83 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_85 = _source_ok_T_84 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_85 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46] wire _T_1174 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1174; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1174; // @[Decoupled.scala:51:35] wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] _a_first_counter_T = a_first ? 3'h0 : a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [16:0] address; // @[Monitor.scala:391:22] wire _T_1242 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1242; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1242; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1242; // @[Decoupled.scala:51:35] wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1 = d_first_beats1_decode; // @[Edges.scala:220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg [64:0] inflight; // @[Monitor.scala:614:27] reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [259:0] inflight_sizes; // @[Monitor.scala:618:33] wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] _a_first_counter_T_1 = a_first_1 ? 3'h0 : a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_decode_1; // @[Edges.scala:220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [64:0] a_set; // @[Monitor.scala:626:34] wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [259:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [127:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1107 = _T_1174 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1107 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1107 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1107 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1107 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1107 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [64:0] d_clr; // @[Monitor.scala:664:34] wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _T_1153 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [127:0] _GEN_4 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_4; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_4; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_4; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_4; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1153 ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1120 = _T_1242 & d_first_1; // @[Decoupled.scala:51:35] assign d_clr = _T_1120 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1120 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,89}, :680:{21,76}] wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1120 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [64:0] inflight_1; // @[Monitor.scala:726:35] wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_decode_2; // @[Edges.scala:220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113] wire [64:0] _inflight_T_5 = _inflight_T_3; // @[Monitor.scala:814:{35,44}] wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3; // @[Monitor.scala:815:{43,60}] wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3; // @[Monitor.scala:816:{41,56}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } } File execution-unit.scala: //****************************************************************************** // Copyright (c) 2013 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Execution Units //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // // The issue window schedules micro-ops onto a specific execution pipeline // A given execution pipeline may contain multiple functional units; one or more // read ports, and one or more writeports. package boom.v3.exu import scala.collection.mutable.{ArrayBuffer} import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.rocket.{BP} import freechips.rocketchip.tile import FUConstants._ import boom.v3.common._ import boom.v3.ifu.{GetPCFromFtqIO} import boom.v3.util.{ImmGen, IsKilledByBranch, BranchKillableQueue, BoomCoreStringPrefix} /** * Response from Execution Unit. Bundles a MicroOp with data * * @param dataWidth width of the data coming from the execution unit */ class ExeUnitResp(val dataWidth: Int)(implicit p: Parameters) extends BoomBundle with HasBoomUOP { val data = Bits(dataWidth.W) val predicated = Bool() // Was this predicated off? val fflags = new ValidIO(new FFlagsResp) // write fflags to ROB // TODO: Do this better } /** * Floating Point flag response */ class FFlagsResp(implicit p: Parameters) extends BoomBundle { val uop = new MicroOp() val flags = Bits(tile.FPConstants.FLAGS_SZ.W) } /** * Abstract Top level Execution Unit that wraps lower level functional units to make a * multi function execution unit. * * @param readsIrf does this exe unit need a integer regfile port * @param writesIrf does this exe unit need a integer regfile port * @param readsFrf does this exe unit need a integer regfile port * @param writesFrf does this exe unit need a integer regfile port * @param writesLlIrf does this exe unit need a integer regfile port * @param writesLlFrf does this exe unit need a integer regfile port * @param numBypassStages number of bypass ports for the exe unit * @param dataWidth width of the data coming out of the exe unit * @param bypassable is the exe unit able to be bypassed * @param hasMem does the exe unit have a MemAddrCalcUnit * @param hasCSR does the exe unit write to the CSRFile * @param hasBrUnit does the exe unit have a branch unit * @param hasAlu does the exe unit have a alu * @param hasFpu does the exe unit have a fpu * @param hasMul does the exe unit have a multiplier * @param hasDiv does the exe unit have a divider * @param hasFdiv does the exe unit have a FP divider * @param hasIfpu does the exe unit have a int to FP unit * @param hasFpiu does the exe unit have a FP to int unit */ abstract class ExecutionUnit( val readsIrf : Boolean = false, val writesIrf : Boolean = false, val readsFrf : Boolean = false, val writesFrf : Boolean = false, val writesLlIrf : Boolean = false, val writesLlFrf : Boolean = false, val numBypassStages : Int, val dataWidth : Int, val bypassable : Boolean = false, // TODO make override def for code clarity val alwaysBypassable : Boolean = false, val hasMem : Boolean = false, val hasCSR : Boolean = false, val hasJmpUnit : Boolean = false, val hasAlu : Boolean = false, val hasFpu : Boolean = false, val hasMul : Boolean = false, val hasDiv : Boolean = false, val hasFdiv : Boolean = false, val hasIfpu : Boolean = false, val hasFpiu : Boolean = false, val hasRocc : Boolean = false )(implicit p: Parameters) extends BoomModule { val io = IO(new Bundle { val fu_types = Output(Bits(FUC_SZ.W)) val req = Flipped(new DecoupledIO(new FuncUnitReq(dataWidth))) val iresp = if (writesIrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null val fresp = if (writesFrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null val ll_iresp = if (writesLlIrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null val ll_fresp = if (writesLlFrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null val bypass = Output(Vec(numBypassStages, Valid(new ExeUnitResp(dataWidth)))) val brupdate = Input(new BrUpdateInfo()) // only used by the rocc unit val rocc = if (hasRocc) new RoCCShimCoreIO else null // only used by the branch unit val brinfo = if (hasAlu) Output(new BrResolutionInfo()) else null val get_ftq_pc = if (hasJmpUnit) Flipped(new GetPCFromFtqIO()) else null val status = Input(new freechips.rocketchip.rocket.MStatus()) // only used by the fpu unit val fcsr_rm = if (hasFcsr) Input(Bits(tile.FPConstants.RM_SZ.W)) else null // only used by the mem unit val lsu_io = if (hasMem) Flipped(new boom.v3.lsu.LSUExeIO) else null val bp = if (hasMem) Input(Vec(nBreakpoints, new BP)) else null val mcontext = if (hasMem) Input(UInt(coreParams.mcontextWidth.W)) else null val scontext = if (hasMem) Input(UInt(coreParams.scontextWidth.W)) else null // TODO move this out of ExecutionUnit val com_exception = if (hasMem || hasRocc) Input(Bool()) else null }) io.req.ready := false.B if (writesIrf) { io.iresp.valid := false.B io.iresp.bits := DontCare io.iresp.bits.fflags.valid := false.B io.iresp.bits.predicated := false.B assert(io.iresp.ready) } if (writesLlIrf) { io.ll_iresp.valid := false.B io.ll_iresp.bits := DontCare io.ll_iresp.bits.fflags.valid := false.B io.ll_iresp.bits.predicated := false.B } if (writesFrf) { io.fresp.valid := false.B io.fresp.bits := DontCare io.fresp.bits.fflags.valid := false.B io.fresp.bits.predicated := false.B assert(io.fresp.ready) } if (writesLlFrf) { io.ll_fresp.valid := false.B io.ll_fresp.bits := DontCare io.ll_fresp.bits.fflags.valid := false.B io.ll_fresp.bits.predicated := false.B } // TODO add "number of fflag ports", so we can properly account for FPU+Mem combinations def hasFFlags : Boolean = hasFpu || hasFdiv require ((hasFpu || hasFdiv) ^ (hasAlu || hasMul || hasMem || hasIfpu), "[execute] we no longer support mixing FP and Integer functional units in the same exe unit.") def hasFcsr = hasIfpu || hasFpu || hasFdiv require (bypassable || !alwaysBypassable, "[execute] an execution unit must be bypassable if it is always bypassable") def supportedFuncUnits = { new SupportedFuncUnits( alu = hasAlu, jmp = hasJmpUnit, mem = hasMem, muld = hasMul || hasDiv, fpu = hasFpu, csr = hasCSR, fdiv = hasFdiv, ifpu = hasIfpu) } } /** * ALU execution unit that can have a branch, alu, mul, div, int to FP, * and memory unit. * * @param hasBrUnit does the exe unit have a branch unit * @param hasCSR does the exe unit write to the CSRFile * @param hasAlu does the exe unit have a alu * @param hasMul does the exe unit have a multiplier * @param hasDiv does the exe unit have a divider * @param hasIfpu does the exe unit have a int to FP unit * @param hasMem does the exe unit have a MemAddrCalcUnit */ class ALUExeUnit( hasJmpUnit : Boolean = false, hasCSR : Boolean = false, hasAlu : Boolean = true, hasMul : Boolean = false, hasDiv : Boolean = false, hasIfpu : Boolean = false, hasMem : Boolean = false, hasRocc : Boolean = false) (implicit p: Parameters) extends ExecutionUnit( readsIrf = true, writesIrf = hasAlu || hasMul || hasDiv, writesLlIrf = hasMem || hasRocc, writesLlFrf = (hasIfpu || hasMem) && p(tile.TileKey).core.fpu != None, numBypassStages = if (hasAlu && hasMul) 3 //TODO XXX p(tile.TileKey).core.imulLatency else if (hasAlu) 1 else 0, dataWidth = 64 + 1, bypassable = hasAlu, alwaysBypassable = hasAlu && !(hasMem || hasJmpUnit || hasMul || hasDiv || hasCSR || hasIfpu || hasRocc), hasCSR = hasCSR, hasJmpUnit = hasJmpUnit, hasAlu = hasAlu, hasMul = hasMul, hasDiv = hasDiv, hasIfpu = hasIfpu, hasMem = hasMem, hasRocc = hasRocc) with freechips.rocketchip.rocket.constants.MemoryOpConstants { require(!(hasRocc && !hasCSR), "RoCC needs to be shared with CSR unit") require(!(hasMem && hasRocc), "We do not support execution unit with both Mem and Rocc writebacks") require(!(hasMem && hasIfpu), "TODO. Currently do not support AluMemExeUnit with FP") val out_str = BoomCoreStringPrefix("==ExeUnit==") + (if (hasAlu) BoomCoreStringPrefix(" - ALU") else "") + (if (hasMul) BoomCoreStringPrefix(" - Mul") else "") + (if (hasDiv) BoomCoreStringPrefix(" - Div") else "") + (if (hasIfpu) BoomCoreStringPrefix(" - IFPU") else "") + (if (hasMem) BoomCoreStringPrefix(" - Mem") else "") + (if (hasRocc) BoomCoreStringPrefix(" - RoCC") else "") override def toString: String = out_str.toString val div_busy = WireInit(false.B) val ifpu_busy = WireInit(false.B) // The Functional Units -------------------- // Specifically the functional units with fast writeback to IRF val iresp_fu_units = ArrayBuffer[FunctionalUnit]() io.fu_types := Mux(hasAlu.B, FU_ALU, 0.U) | Mux(hasMul.B, FU_MUL, 0.U) | Mux(!div_busy && hasDiv.B, FU_DIV, 0.U) | Mux(hasCSR.B, FU_CSR, 0.U) | Mux(hasJmpUnit.B, FU_JMP, 0.U) | Mux(!ifpu_busy && hasIfpu.B, FU_I2F, 0.U) | Mux(hasMem.B, FU_MEM, 0.U) // ALU Unit ------------------------------- var alu: ALUUnit = null if (hasAlu) { alu = Module(new ALUUnit(isJmpUnit = hasJmpUnit, numStages = numBypassStages, dataWidth = xLen)) alu.io.req.valid := ( io.req.valid && (io.req.bits.uop.fu_code === FU_ALU || io.req.bits.uop.fu_code === FU_JMP || (io.req.bits.uop.fu_code === FU_CSR && io.req.bits.uop.uopc =/= uopROCC))) //ROCC Rocc Commands are taken by the RoCC unit alu.io.req.bits.uop := io.req.bits.uop alu.io.req.bits.kill := io.req.bits.kill alu.io.req.bits.rs1_data := io.req.bits.rs1_data alu.io.req.bits.rs2_data := io.req.bits.rs2_data alu.io.req.bits.rs3_data := DontCare alu.io.req.bits.pred_data := io.req.bits.pred_data alu.io.resp.ready := DontCare alu.io.brupdate := io.brupdate iresp_fu_units += alu // Bypassing only applies to ALU io.bypass := alu.io.bypass // branch unit is embedded inside the ALU io.brinfo := alu.io.brinfo if (hasJmpUnit) { alu.io.get_ftq_pc <> io.get_ftq_pc } } var rocc: RoCCShim = null if (hasRocc) { rocc = Module(new RoCCShim) rocc.io.req.valid := io.req.valid && io.req.bits.uop.uopc === uopROCC rocc.io.req.bits := DontCare rocc.io.req.bits.uop := io.req.bits.uop rocc.io.req.bits.kill := io.req.bits.kill rocc.io.req.bits.rs1_data := io.req.bits.rs1_data rocc.io.req.bits.rs2_data := io.req.bits.rs2_data rocc.io.brupdate := io.brupdate // We should assert on this somewhere rocc.io.status := io.status rocc.io.exception := io.com_exception io.rocc <> rocc.io.core rocc.io.resp.ready := io.ll_iresp.ready io.ll_iresp.valid := rocc.io.resp.valid io.ll_iresp.bits.uop := rocc.io.resp.bits.uop io.ll_iresp.bits.data := rocc.io.resp.bits.data } // Pipelined, IMul Unit ------------------ var imul: PipelinedMulUnit = null if (hasMul) { imul = Module(new PipelinedMulUnit(imulLatency, xLen)) imul.io <> DontCare imul.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_MUL) imul.io.req.bits.uop := io.req.bits.uop imul.io.req.bits.rs1_data := io.req.bits.rs1_data imul.io.req.bits.rs2_data := io.req.bits.rs2_data imul.io.req.bits.kill := io.req.bits.kill imul.io.brupdate := io.brupdate iresp_fu_units += imul } var ifpu: IntToFPUnit = null if (hasIfpu) { ifpu = Module(new IntToFPUnit(latency=intToFpLatency)) ifpu.io.req <> io.req ifpu.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_I2F) ifpu.io.fcsr_rm := io.fcsr_rm ifpu.io.brupdate <> io.brupdate ifpu.io.resp.ready := DontCare // buffer up results since we share write-port on integer regfile. val queue = Module(new BranchKillableQueue(new ExeUnitResp(dataWidth), entries = intToFpLatency + 3)) // TODO being overly conservative queue.io.enq.valid := ifpu.io.resp.valid queue.io.enq.bits.uop := ifpu.io.resp.bits.uop queue.io.enq.bits.data := ifpu.io.resp.bits.data queue.io.enq.bits.predicated := ifpu.io.resp.bits.predicated queue.io.enq.bits.fflags := ifpu.io.resp.bits.fflags queue.io.brupdate := io.brupdate queue.io.flush := io.req.bits.kill io.ll_fresp <> queue.io.deq ifpu_busy := !(queue.io.empty) assert (queue.io.enq.ready) } // Div/Rem Unit ----------------------- var div: DivUnit = null val div_resp_val = WireInit(false.B) if (hasDiv) { div = Module(new DivUnit(xLen)) div.io <> DontCare div.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_DIV) && hasDiv.B div.io.req.bits.uop := io.req.bits.uop div.io.req.bits.rs1_data := io.req.bits.rs1_data div.io.req.bits.rs2_data := io.req.bits.rs2_data div.io.brupdate := io.brupdate div.io.req.bits.kill := io.req.bits.kill // share write port with the pipelined units div.io.resp.ready := !(iresp_fu_units.map(_.io.resp.valid).reduce(_|_)) div_resp_val := div.io.resp.valid div_busy := !div.io.req.ready || (io.req.valid && io.req.bits.uop.fu_code_is(FU_DIV)) iresp_fu_units += div } // Mem Unit -------------------------- if (hasMem) { require(!hasAlu) val maddrcalc = Module(new MemAddrCalcUnit) maddrcalc.io.req <> io.req maddrcalc.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_MEM) maddrcalc.io.brupdate <> io.brupdate maddrcalc.io.status := io.status maddrcalc.io.bp := io.bp maddrcalc.io.mcontext := io.mcontext maddrcalc.io.scontext := io.scontext maddrcalc.io.resp.ready := DontCare require(numBypassStages == 0) io.lsu_io.req := maddrcalc.io.resp io.ll_iresp <> io.lsu_io.iresp if (usingFPU) { io.ll_fresp <> io.lsu_io.fresp } } // Outputs (Write Port #0) --------------- if (writesIrf) { io.iresp.valid := iresp_fu_units.map(_.io.resp.valid).reduce(_|_) io.iresp.bits.uop := PriorityMux(iresp_fu_units.map(f => (f.io.resp.valid, f.io.resp.bits.uop)).toSeq) io.iresp.bits.data := PriorityMux(iresp_fu_units.map(f => (f.io.resp.valid, f.io.resp.bits.data)).toSeq) io.iresp.bits.predicated := PriorityMux(iresp_fu_units.map(f => (f.io.resp.valid, f.io.resp.bits.predicated)).toSeq) // pulled out for critical path reasons // TODO: Does this make sense as part of the iresp bundle? if (hasAlu) { io.iresp.bits.uop.csr_addr := ImmGen(alu.io.resp.bits.uop.imm_packed, IS_I).asUInt io.iresp.bits.uop.ctrl.csr_cmd := alu.io.resp.bits.uop.ctrl.csr_cmd } } assert ((PopCount(iresp_fu_units.map(_.io.resp.valid)) <= 1.U && !div_resp_val) || (PopCount(iresp_fu_units.map(_.io.resp.valid)) <= 2.U && (div_resp_val)), "Multiple functional units are fighting over the write port.") } /** * FPU-only unit, with optional second write-port for ToInt micro-ops. * * @param hasFpu does the exe unit have a fpu * @param hasFdiv does the exe unit have a FP divider * @param hasFpiu does the exe unit have a FP to int unit */ class FPUExeUnit( hasFpu : Boolean = true, hasFdiv : Boolean = false, hasFpiu : Boolean = false ) (implicit p: Parameters) extends ExecutionUnit( readsFrf = true, writesFrf = true, writesLlIrf = hasFpiu, writesIrf = false, numBypassStages = 0, dataWidth = p(tile.TileKey).core.fpu.get.fLen + 1, bypassable = false, hasFpu = hasFpu, hasFdiv = hasFdiv, hasFpiu = hasFpiu) with tile.HasFPUParameters { val out_str = BoomCoreStringPrefix("==ExeUnit==") (if (hasFpu) BoomCoreStringPrefix("- FPU (Latency: " + dfmaLatency + ")") else "") + (if (hasFdiv) BoomCoreStringPrefix("- FDiv/FSqrt") else "") + (if (hasFpiu) BoomCoreStringPrefix("- FPIU (writes to Integer RF)") else "") val fdiv_busy = WireInit(false.B) val fpiu_busy = WireInit(false.B) // The Functional Units -------------------- val fu_units = ArrayBuffer[FunctionalUnit]() io.fu_types := Mux(hasFpu.B, FU_FPU, 0.U) | Mux(!fdiv_busy && hasFdiv.B, FU_FDV, 0.U) | Mux(!fpiu_busy && hasFpiu.B, FU_F2I, 0.U) // FPU Unit ----------------------- var fpu: FPUUnit = null val fpu_resp_val = WireInit(false.B) val fpu_resp_fflags = Wire(new ValidIO(new FFlagsResp())) fpu_resp_fflags.valid := false.B if (hasFpu) { fpu = Module(new FPUUnit()) fpu.io.req.valid := io.req.valid && (io.req.bits.uop.fu_code_is(FU_FPU) || io.req.bits.uop.fu_code_is(FU_F2I)) // TODO move to using a separate unit fpu.io.req.bits.uop := io.req.bits.uop fpu.io.req.bits.rs1_data := io.req.bits.rs1_data fpu.io.req.bits.rs2_data := io.req.bits.rs2_data fpu.io.req.bits.rs3_data := io.req.bits.rs3_data fpu.io.req.bits.pred_data := false.B fpu.io.req.bits.kill := io.req.bits.kill fpu.io.fcsr_rm := io.fcsr_rm fpu.io.brupdate := io.brupdate fpu.io.resp.ready := DontCare fpu_resp_val := fpu.io.resp.valid fpu_resp_fflags := fpu.io.resp.bits.fflags fu_units += fpu } // FDiv/FSqrt Unit ----------------------- var fdivsqrt: FDivSqrtUnit = null val fdiv_resp_fflags = Wire(new ValidIO(new FFlagsResp())) fdiv_resp_fflags := DontCare fdiv_resp_fflags.valid := false.B if (hasFdiv) { fdivsqrt = Module(new FDivSqrtUnit()) fdivsqrt.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_FDV) fdivsqrt.io.req.bits.uop := io.req.bits.uop fdivsqrt.io.req.bits.rs1_data := io.req.bits.rs1_data fdivsqrt.io.req.bits.rs2_data := io.req.bits.rs2_data fdivsqrt.io.req.bits.rs3_data := DontCare fdivsqrt.io.req.bits.pred_data := false.B fdivsqrt.io.req.bits.kill := io.req.bits.kill fdivsqrt.io.fcsr_rm := io.fcsr_rm fdivsqrt.io.brupdate := io.brupdate // share write port with the pipelined units fdivsqrt.io.resp.ready := !(fu_units.map(_.io.resp.valid).reduce(_|_)) // TODO PERF will get blocked by fpiu. fdiv_busy := !fdivsqrt.io.req.ready || (io.req.valid && io.req.bits.uop.fu_code_is(FU_FDV)) fdiv_resp_fflags := fdivsqrt.io.resp.bits.fflags fu_units += fdivsqrt } // Outputs (Write Port #0) --------------- io.fresp.valid := fu_units.map(_.io.resp.valid).reduce(_|_) && !(fpu.io.resp.valid && fpu.io.resp.bits.uop.fu_code_is(FU_F2I)) io.fresp.bits.uop := PriorityMux(fu_units.map(f => (f.io.resp.valid, f.io.resp.bits.uop)).toSeq) io.fresp.bits.data:= PriorityMux(fu_units.map(f => (f.io.resp.valid, f.io.resp.bits.data)).toSeq) io.fresp.bits.fflags := Mux(fpu_resp_val, fpu_resp_fflags, fdiv_resp_fflags) // Outputs (Write Port #1) -- FpToInt Queuing Unit ----------------------- if (hasFpiu) { // TODO instantiate our own fpiu; and remove it from fpu.scala. // buffer up results since we share write-port on integer regfile. val queue = Module(new BranchKillableQueue(new ExeUnitResp(dataWidth), entries = dfmaLatency + 3)) // TODO being overly conservative queue.io.enq.valid := (fpu.io.resp.valid && fpu.io.resp.bits.uop.fu_code_is(FU_F2I) && fpu.io.resp.bits.uop.uopc =/= uopSTA) // STA means store data gen for floating point queue.io.enq.bits.uop := fpu.io.resp.bits.uop queue.io.enq.bits.data := fpu.io.resp.bits.data queue.io.enq.bits.predicated := fpu.io.resp.bits.predicated queue.io.enq.bits.fflags := fpu.io.resp.bits.fflags queue.io.brupdate := io.brupdate queue.io.flush := io.req.bits.kill assert (queue.io.enq.ready) // If this backs up, we've miscalculated the size of the queue. val fp_sdq = Module(new BranchKillableQueue(new ExeUnitResp(dataWidth), entries = 3)) // Lets us backpressure floating point store data fp_sdq.io.enq.valid := io.req.valid && io.req.bits.uop.uopc === uopSTA && !IsKilledByBranch(io.brupdate, io.req.bits.uop) fp_sdq.io.enq.bits.uop := io.req.bits.uop fp_sdq.io.enq.bits.data := ieee(io.req.bits.rs2_data) fp_sdq.io.enq.bits.predicated := false.B fp_sdq.io.enq.bits.fflags := DontCare fp_sdq.io.brupdate := io.brupdate fp_sdq.io.flush := io.req.bits.kill assert(!(fp_sdq.io.enq.valid && !fp_sdq.io.enq.ready)) val resp_arb = Module(new Arbiter(new ExeUnitResp(dataWidth), 2)) resp_arb.io.in(0) <> queue.io.deq resp_arb.io.in(1) <> fp_sdq.io.deq io.ll_iresp <> resp_arb.io.out fpiu_busy := !(queue.io.empty && fp_sdq.io.empty) } override def toString: String = out_str.toString }
module ALUExeUnit_2( // @[execution-unit.scala:204:7] input clock, // @[execution-unit.scala:204:7] input reset, // @[execution-unit.scala:204:7] output [9:0] io_fu_types, // @[execution-unit.scala:104:14] input io_req_valid, // @[execution-unit.scala:104:14] input [6:0] io_req_bits_uop_uopc, // @[execution-unit.scala:104:14] input [31:0] io_req_bits_uop_inst, // @[execution-unit.scala:104:14] input [31:0] io_req_bits_uop_debug_inst, // @[execution-unit.scala:104:14] input io_req_bits_uop_is_rvc, // @[execution-unit.scala:104:14] input [39:0] io_req_bits_uop_debug_pc, // @[execution-unit.scala:104:14] input [2:0] io_req_bits_uop_iq_type, // @[execution-unit.scala:104:14] input [9:0] io_req_bits_uop_fu_code, // @[execution-unit.scala:104:14] input [3:0] io_req_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14] input [1:0] io_req_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14] input [2:0] io_req_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14] input [2:0] io_req_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14] input [4:0] io_req_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14] input io_req_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14] input [2:0] io_req_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14] input io_req_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14] input io_req_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14] input io_req_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14] input [1:0] io_req_bits_uop_iw_state, // @[execution-unit.scala:104:14] input io_req_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14] input io_req_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14] input io_req_bits_uop_is_br, // @[execution-unit.scala:104:14] input io_req_bits_uop_is_jalr, // @[execution-unit.scala:104:14] input io_req_bits_uop_is_jal, // @[execution-unit.scala:104:14] input io_req_bits_uop_is_sfb, // @[execution-unit.scala:104:14] input [15:0] io_req_bits_uop_br_mask, // @[execution-unit.scala:104:14] input [3:0] io_req_bits_uop_br_tag, // @[execution-unit.scala:104:14] input [4:0] io_req_bits_uop_ftq_idx, // @[execution-unit.scala:104:14] input io_req_bits_uop_edge_inst, // @[execution-unit.scala:104:14] input [5:0] io_req_bits_uop_pc_lob, // @[execution-unit.scala:104:14] input io_req_bits_uop_taken, // @[execution-unit.scala:104:14] input [19:0] io_req_bits_uop_imm_packed, // @[execution-unit.scala:104:14] input [11:0] io_req_bits_uop_csr_addr, // @[execution-unit.scala:104:14] input [6:0] io_req_bits_uop_rob_idx, // @[execution-unit.scala:104:14] input [4:0] io_req_bits_uop_ldq_idx, // @[execution-unit.scala:104:14] input [4:0] io_req_bits_uop_stq_idx, // @[execution-unit.scala:104:14] input [1:0] io_req_bits_uop_rxq_idx, // @[execution-unit.scala:104:14] input [6:0] io_req_bits_uop_pdst, // @[execution-unit.scala:104:14] input [6:0] io_req_bits_uop_prs1, // @[execution-unit.scala:104:14] input [6:0] io_req_bits_uop_prs2, // @[execution-unit.scala:104:14] input [6:0] io_req_bits_uop_prs3, // @[execution-unit.scala:104:14] input [4:0] io_req_bits_uop_ppred, // @[execution-unit.scala:104:14] input io_req_bits_uop_prs1_busy, // @[execution-unit.scala:104:14] input io_req_bits_uop_prs2_busy, // @[execution-unit.scala:104:14] input io_req_bits_uop_prs3_busy, // @[execution-unit.scala:104:14] input io_req_bits_uop_ppred_busy, // @[execution-unit.scala:104:14] input [6:0] io_req_bits_uop_stale_pdst, // @[execution-unit.scala:104:14] input io_req_bits_uop_exception, // @[execution-unit.scala:104:14] input [63:0] io_req_bits_uop_exc_cause, // @[execution-unit.scala:104:14] input io_req_bits_uop_bypassable, // @[execution-unit.scala:104:14] input [4:0] io_req_bits_uop_mem_cmd, // @[execution-unit.scala:104:14] input [1:0] io_req_bits_uop_mem_size, // @[execution-unit.scala:104:14] input io_req_bits_uop_mem_signed, // @[execution-unit.scala:104:14] input io_req_bits_uop_is_fence, // @[execution-unit.scala:104:14] input io_req_bits_uop_is_fencei, // @[execution-unit.scala:104:14] input io_req_bits_uop_is_amo, // @[execution-unit.scala:104:14] input io_req_bits_uop_uses_ldq, // @[execution-unit.scala:104:14] input io_req_bits_uop_uses_stq, // @[execution-unit.scala:104:14] input io_req_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14] input io_req_bits_uop_is_unique, // @[execution-unit.scala:104:14] input io_req_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14] input io_req_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14] input [5:0] io_req_bits_uop_ldst, // @[execution-unit.scala:104:14] input [5:0] io_req_bits_uop_lrs1, // @[execution-unit.scala:104:14] input [5:0] io_req_bits_uop_lrs2, // @[execution-unit.scala:104:14] input [5:0] io_req_bits_uop_lrs3, // @[execution-unit.scala:104:14] input io_req_bits_uop_ldst_val, // @[execution-unit.scala:104:14] input [1:0] io_req_bits_uop_dst_rtype, // @[execution-unit.scala:104:14] input [1:0] io_req_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14] input [1:0] io_req_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14] input io_req_bits_uop_frs3_en, // @[execution-unit.scala:104:14] input io_req_bits_uop_fp_val, // @[execution-unit.scala:104:14] input io_req_bits_uop_fp_single, // @[execution-unit.scala:104:14] input io_req_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14] input io_req_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14] input io_req_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14] input io_req_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14] input io_req_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14] input [1:0] io_req_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14] input [1:0] io_req_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14] input [64:0] io_req_bits_rs1_data, // @[execution-unit.scala:104:14] input [64:0] io_req_bits_rs2_data, // @[execution-unit.scala:104:14] input io_req_bits_kill, // @[execution-unit.scala:104:14] output io_iresp_valid, // @[execution-unit.scala:104:14] output [6:0] io_iresp_bits_uop_uopc, // @[execution-unit.scala:104:14] output [31:0] io_iresp_bits_uop_inst, // @[execution-unit.scala:104:14] output [31:0] io_iresp_bits_uop_debug_inst, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_is_rvc, // @[execution-unit.scala:104:14] output [39:0] io_iresp_bits_uop_debug_pc, // @[execution-unit.scala:104:14] output [2:0] io_iresp_bits_uop_iq_type, // @[execution-unit.scala:104:14] output [9:0] io_iresp_bits_uop_fu_code, // @[execution-unit.scala:104:14] output [3:0] io_iresp_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14] output [1:0] io_iresp_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14] output [2:0] io_iresp_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14] output [2:0] io_iresp_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14] output [4:0] io_iresp_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14] output [2:0] io_iresp_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14] output [1:0] io_iresp_bits_uop_iw_state, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_is_br, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_is_jalr, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_is_jal, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_is_sfb, // @[execution-unit.scala:104:14] output [15:0] io_iresp_bits_uop_br_mask, // @[execution-unit.scala:104:14] output [3:0] io_iresp_bits_uop_br_tag, // @[execution-unit.scala:104:14] output [4:0] io_iresp_bits_uop_ftq_idx, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_edge_inst, // @[execution-unit.scala:104:14] output [5:0] io_iresp_bits_uop_pc_lob, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_taken, // @[execution-unit.scala:104:14] output [19:0] io_iresp_bits_uop_imm_packed, // @[execution-unit.scala:104:14] output [11:0] io_iresp_bits_uop_csr_addr, // @[execution-unit.scala:104:14] output [6:0] io_iresp_bits_uop_rob_idx, // @[execution-unit.scala:104:14] output [4:0] io_iresp_bits_uop_ldq_idx, // @[execution-unit.scala:104:14] output [4:0] io_iresp_bits_uop_stq_idx, // @[execution-unit.scala:104:14] output [1:0] io_iresp_bits_uop_rxq_idx, // @[execution-unit.scala:104:14] output [6:0] io_iresp_bits_uop_pdst, // @[execution-unit.scala:104:14] output [6:0] io_iresp_bits_uop_prs1, // @[execution-unit.scala:104:14] output [6:0] io_iresp_bits_uop_prs2, // @[execution-unit.scala:104:14] output [6:0] io_iresp_bits_uop_prs3, // @[execution-unit.scala:104:14] output [4:0] io_iresp_bits_uop_ppred, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_prs1_busy, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_prs2_busy, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_prs3_busy, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_ppred_busy, // @[execution-unit.scala:104:14] output [6:0] io_iresp_bits_uop_stale_pdst, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_exception, // @[execution-unit.scala:104:14] output [63:0] io_iresp_bits_uop_exc_cause, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_bypassable, // @[execution-unit.scala:104:14] output [4:0] io_iresp_bits_uop_mem_cmd, // @[execution-unit.scala:104:14] output [1:0] io_iresp_bits_uop_mem_size, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_mem_signed, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_is_fence, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_is_fencei, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_is_amo, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_uses_ldq, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_uses_stq, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_is_unique, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14] output [5:0] io_iresp_bits_uop_ldst, // @[execution-unit.scala:104:14] output [5:0] io_iresp_bits_uop_lrs1, // @[execution-unit.scala:104:14] output [5:0] io_iresp_bits_uop_lrs2, // @[execution-unit.scala:104:14] output [5:0] io_iresp_bits_uop_lrs3, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_ldst_val, // @[execution-unit.scala:104:14] output [1:0] io_iresp_bits_uop_dst_rtype, // @[execution-unit.scala:104:14] output [1:0] io_iresp_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14] output [1:0] io_iresp_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_frs3_en, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_fp_val, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_fp_single, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14] output io_iresp_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14] output [1:0] io_iresp_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14] output [1:0] io_iresp_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14] output [64:0] io_iresp_bits_data, // @[execution-unit.scala:104:14] input io_ll_fresp_ready, // @[execution-unit.scala:104:14] output io_ll_fresp_valid, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_uop_uopc, // @[execution-unit.scala:104:14] output [31:0] io_ll_fresp_bits_uop_inst, // @[execution-unit.scala:104:14] output [31:0] io_ll_fresp_bits_uop_debug_inst, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_is_rvc, // @[execution-unit.scala:104:14] output [39:0] io_ll_fresp_bits_uop_debug_pc, // @[execution-unit.scala:104:14] output [2:0] io_ll_fresp_bits_uop_iq_type, // @[execution-unit.scala:104:14] output [9:0] io_ll_fresp_bits_uop_fu_code, // @[execution-unit.scala:104:14] output [3:0] io_ll_fresp_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14] output [2:0] io_ll_fresp_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14] output [2:0] io_ll_fresp_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14] output [2:0] io_ll_fresp_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_uop_iw_state, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_is_br, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_is_jalr, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_is_jal, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_is_sfb, // @[execution-unit.scala:104:14] output [15:0] io_ll_fresp_bits_uop_br_mask, // @[execution-unit.scala:104:14] output [3:0] io_ll_fresp_bits_uop_br_tag, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_uop_ftq_idx, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_edge_inst, // @[execution-unit.scala:104:14] output [5:0] io_ll_fresp_bits_uop_pc_lob, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_taken, // @[execution-unit.scala:104:14] output [19:0] io_ll_fresp_bits_uop_imm_packed, // @[execution-unit.scala:104:14] output [11:0] io_ll_fresp_bits_uop_csr_addr, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_uop_rob_idx, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_uop_ldq_idx, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_uop_stq_idx, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_uop_rxq_idx, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_uop_pdst, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_uop_prs1, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_uop_prs2, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_uop_prs3, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_uop_ppred, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_prs1_busy, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_prs2_busy, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_prs3_busy, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_ppred_busy, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_uop_stale_pdst, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_exception, // @[execution-unit.scala:104:14] output [63:0] io_ll_fresp_bits_uop_exc_cause, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_bypassable, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_uop_mem_cmd, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_uop_mem_size, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_mem_signed, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_is_fence, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_is_fencei, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_is_amo, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_uses_ldq, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_uses_stq, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_is_unique, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14] output [5:0] io_ll_fresp_bits_uop_ldst, // @[execution-unit.scala:104:14] output [5:0] io_ll_fresp_bits_uop_lrs1, // @[execution-unit.scala:104:14] output [5:0] io_ll_fresp_bits_uop_lrs2, // @[execution-unit.scala:104:14] output [5:0] io_ll_fresp_bits_uop_lrs3, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_ldst_val, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_uop_dst_rtype, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_frs3_en, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_fp_val, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_fp_single, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14] output [64:0] io_ll_fresp_bits_data, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_predicated, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_valid, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_fflags_bits_uop_uopc, // @[execution-unit.scala:104:14] output [31:0] io_ll_fresp_bits_fflags_bits_uop_inst, // @[execution-unit.scala:104:14] output [31:0] io_ll_fresp_bits_fflags_bits_uop_debug_inst, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_is_rvc, // @[execution-unit.scala:104:14] output [39:0] io_ll_fresp_bits_fflags_bits_uop_debug_pc, // @[execution-unit.scala:104:14] output [2:0] io_ll_fresp_bits_fflags_bits_uop_iq_type, // @[execution-unit.scala:104:14] output [9:0] io_ll_fresp_bits_fflags_bits_uop_fu_code, // @[execution-unit.scala:104:14] output [3:0] io_ll_fresp_bits_fflags_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_fflags_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14] output [2:0] io_ll_fresp_bits_fflags_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14] output [2:0] io_ll_fresp_bits_fflags_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_fflags_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14] output [2:0] io_ll_fresp_bits_fflags_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_fflags_bits_uop_iw_state, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_is_br, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_is_jalr, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_is_jal, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_is_sfb, // @[execution-unit.scala:104:14] output [15:0] io_ll_fresp_bits_fflags_bits_uop_br_mask, // @[execution-unit.scala:104:14] output [3:0] io_ll_fresp_bits_fflags_bits_uop_br_tag, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_fflags_bits_uop_ftq_idx, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_edge_inst, // @[execution-unit.scala:104:14] output [5:0] io_ll_fresp_bits_fflags_bits_uop_pc_lob, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_taken, // @[execution-unit.scala:104:14] output [19:0] io_ll_fresp_bits_fflags_bits_uop_imm_packed, // @[execution-unit.scala:104:14] output [11:0] io_ll_fresp_bits_fflags_bits_uop_csr_addr, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_fflags_bits_uop_rob_idx, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_fflags_bits_uop_ldq_idx, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_fflags_bits_uop_stq_idx, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_fflags_bits_uop_rxq_idx, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_fflags_bits_uop_pdst, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_fflags_bits_uop_prs1, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_fflags_bits_uop_prs2, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_fflags_bits_uop_prs3, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_fflags_bits_uop_ppred, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_prs1_busy, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_prs2_busy, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_prs3_busy, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_ppred_busy, // @[execution-unit.scala:104:14] output [6:0] io_ll_fresp_bits_fflags_bits_uop_stale_pdst, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_exception, // @[execution-unit.scala:104:14] output [63:0] io_ll_fresp_bits_fflags_bits_uop_exc_cause, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_bypassable, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_fflags_bits_uop_mem_cmd, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_fflags_bits_uop_mem_size, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_mem_signed, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_is_fence, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_is_fencei, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_is_amo, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_uses_ldq, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_uses_stq, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_is_unique, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14] output [5:0] io_ll_fresp_bits_fflags_bits_uop_ldst, // @[execution-unit.scala:104:14] output [5:0] io_ll_fresp_bits_fflags_bits_uop_lrs1, // @[execution-unit.scala:104:14] output [5:0] io_ll_fresp_bits_fflags_bits_uop_lrs2, // @[execution-unit.scala:104:14] output [5:0] io_ll_fresp_bits_fflags_bits_uop_lrs3, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_ldst_val, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_fflags_bits_uop_dst_rtype, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_fflags_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_fflags_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_frs3_en, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_fp_val, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_fp_single, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14] output io_ll_fresp_bits_fflags_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_fflags_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14] output [1:0] io_ll_fresp_bits_fflags_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14] output [4:0] io_ll_fresp_bits_fflags_bits_flags, // @[execution-unit.scala:104:14] output io_bypass_0_valid, // @[execution-unit.scala:104:14] output [6:0] io_bypass_0_bits_uop_uopc, // @[execution-unit.scala:104:14] output [31:0] io_bypass_0_bits_uop_inst, // @[execution-unit.scala:104:14] output [31:0] io_bypass_0_bits_uop_debug_inst, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_is_rvc, // @[execution-unit.scala:104:14] output [39:0] io_bypass_0_bits_uop_debug_pc, // @[execution-unit.scala:104:14] output [2:0] io_bypass_0_bits_uop_iq_type, // @[execution-unit.scala:104:14] output [9:0] io_bypass_0_bits_uop_fu_code, // @[execution-unit.scala:104:14] output [3:0] io_bypass_0_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14] output [1:0] io_bypass_0_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14] output [2:0] io_bypass_0_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14] output [2:0] io_bypass_0_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14] output [4:0] io_bypass_0_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14] output [2:0] io_bypass_0_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14] output [1:0] io_bypass_0_bits_uop_iw_state, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_is_br, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_is_jalr, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_is_jal, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_is_sfb, // @[execution-unit.scala:104:14] output [15:0] io_bypass_0_bits_uop_br_mask, // @[execution-unit.scala:104:14] output [3:0] io_bypass_0_bits_uop_br_tag, // @[execution-unit.scala:104:14] output [4:0] io_bypass_0_bits_uop_ftq_idx, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_edge_inst, // @[execution-unit.scala:104:14] output [5:0] io_bypass_0_bits_uop_pc_lob, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_taken, // @[execution-unit.scala:104:14] output [19:0] io_bypass_0_bits_uop_imm_packed, // @[execution-unit.scala:104:14] output [11:0] io_bypass_0_bits_uop_csr_addr, // @[execution-unit.scala:104:14] output [6:0] io_bypass_0_bits_uop_rob_idx, // @[execution-unit.scala:104:14] output [4:0] io_bypass_0_bits_uop_ldq_idx, // @[execution-unit.scala:104:14] output [4:0] io_bypass_0_bits_uop_stq_idx, // @[execution-unit.scala:104:14] output [1:0] io_bypass_0_bits_uop_rxq_idx, // @[execution-unit.scala:104:14] output [6:0] io_bypass_0_bits_uop_pdst, // @[execution-unit.scala:104:14] output [6:0] io_bypass_0_bits_uop_prs1, // @[execution-unit.scala:104:14] output [6:0] io_bypass_0_bits_uop_prs2, // @[execution-unit.scala:104:14] output [6:0] io_bypass_0_bits_uop_prs3, // @[execution-unit.scala:104:14] output [4:0] io_bypass_0_bits_uop_ppred, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_prs1_busy, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_prs2_busy, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_prs3_busy, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_ppred_busy, // @[execution-unit.scala:104:14] output [6:0] io_bypass_0_bits_uop_stale_pdst, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_exception, // @[execution-unit.scala:104:14] output [63:0] io_bypass_0_bits_uop_exc_cause, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_bypassable, // @[execution-unit.scala:104:14] output [4:0] io_bypass_0_bits_uop_mem_cmd, // @[execution-unit.scala:104:14] output [1:0] io_bypass_0_bits_uop_mem_size, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_mem_signed, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_is_fence, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_is_fencei, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_is_amo, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_uses_ldq, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_uses_stq, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_is_unique, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14] output [5:0] io_bypass_0_bits_uop_ldst, // @[execution-unit.scala:104:14] output [5:0] io_bypass_0_bits_uop_lrs1, // @[execution-unit.scala:104:14] output [5:0] io_bypass_0_bits_uop_lrs2, // @[execution-unit.scala:104:14] output [5:0] io_bypass_0_bits_uop_lrs3, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_ldst_val, // @[execution-unit.scala:104:14] output [1:0] io_bypass_0_bits_uop_dst_rtype, // @[execution-unit.scala:104:14] output [1:0] io_bypass_0_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14] output [1:0] io_bypass_0_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_frs3_en, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_fp_val, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_fp_single, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14] output io_bypass_0_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14] output [1:0] io_bypass_0_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14] output [1:0] io_bypass_0_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14] output [64:0] io_bypass_0_bits_data, // @[execution-unit.scala:104:14] input [15:0] io_brupdate_b1_resolve_mask, // @[execution-unit.scala:104:14] input [15:0] io_brupdate_b1_mispredict_mask, // @[execution-unit.scala:104:14] input [6:0] io_brupdate_b2_uop_uopc, // @[execution-unit.scala:104:14] input [31:0] io_brupdate_b2_uop_inst, // @[execution-unit.scala:104:14] input [31:0] io_brupdate_b2_uop_debug_inst, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_is_rvc, // @[execution-unit.scala:104:14] input [39:0] io_brupdate_b2_uop_debug_pc, // @[execution-unit.scala:104:14] input [2:0] io_brupdate_b2_uop_iq_type, // @[execution-unit.scala:104:14] input [9:0] io_brupdate_b2_uop_fu_code, // @[execution-unit.scala:104:14] input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[execution-unit.scala:104:14] input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14] input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14] input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14] input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14] input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_ctrl_is_load, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_ctrl_is_sta, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_ctrl_is_std, // @[execution-unit.scala:104:14] input [1:0] io_brupdate_b2_uop_iw_state, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_is_br, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_is_jalr, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_is_jal, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_is_sfb, // @[execution-unit.scala:104:14] input [15:0] io_brupdate_b2_uop_br_mask, // @[execution-unit.scala:104:14] input [3:0] io_brupdate_b2_uop_br_tag, // @[execution-unit.scala:104:14] input [4:0] io_brupdate_b2_uop_ftq_idx, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_edge_inst, // @[execution-unit.scala:104:14] input [5:0] io_brupdate_b2_uop_pc_lob, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_taken, // @[execution-unit.scala:104:14] input [19:0] io_brupdate_b2_uop_imm_packed, // @[execution-unit.scala:104:14] input [11:0] io_brupdate_b2_uop_csr_addr, // @[execution-unit.scala:104:14] input [6:0] io_brupdate_b2_uop_rob_idx, // @[execution-unit.scala:104:14] input [4:0] io_brupdate_b2_uop_ldq_idx, // @[execution-unit.scala:104:14] input [4:0] io_brupdate_b2_uop_stq_idx, // @[execution-unit.scala:104:14] input [1:0] io_brupdate_b2_uop_rxq_idx, // @[execution-unit.scala:104:14] input [6:0] io_brupdate_b2_uop_pdst, // @[execution-unit.scala:104:14] input [6:0] io_brupdate_b2_uop_prs1, // @[execution-unit.scala:104:14] input [6:0] io_brupdate_b2_uop_prs2, // @[execution-unit.scala:104:14] input [6:0] io_brupdate_b2_uop_prs3, // @[execution-unit.scala:104:14] input [4:0] io_brupdate_b2_uop_ppred, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_prs1_busy, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_prs2_busy, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_prs3_busy, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_ppred_busy, // @[execution-unit.scala:104:14] input [6:0] io_brupdate_b2_uop_stale_pdst, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_exception, // @[execution-unit.scala:104:14] input [63:0] io_brupdate_b2_uop_exc_cause, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_bypassable, // @[execution-unit.scala:104:14] input [4:0] io_brupdate_b2_uop_mem_cmd, // @[execution-unit.scala:104:14] input [1:0] io_brupdate_b2_uop_mem_size, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_mem_signed, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_is_fence, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_is_fencei, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_is_amo, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_uses_ldq, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_uses_stq, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_is_unique, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_flush_on_commit, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_ldst_is_rs1, // @[execution-unit.scala:104:14] input [5:0] io_brupdate_b2_uop_ldst, // @[execution-unit.scala:104:14] input [5:0] io_brupdate_b2_uop_lrs1, // @[execution-unit.scala:104:14] input [5:0] io_brupdate_b2_uop_lrs2, // @[execution-unit.scala:104:14] input [5:0] io_brupdate_b2_uop_lrs3, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_ldst_val, // @[execution-unit.scala:104:14] input [1:0] io_brupdate_b2_uop_dst_rtype, // @[execution-unit.scala:104:14] input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[execution-unit.scala:104:14] input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_frs3_en, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_fp_val, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_fp_single, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_xcpt_pf_if, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_xcpt_ae_if, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_xcpt_ma_if, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_bp_debug_if, // @[execution-unit.scala:104:14] input io_brupdate_b2_uop_bp_xcpt_if, // @[execution-unit.scala:104:14] input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[execution-unit.scala:104:14] input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[execution-unit.scala:104:14] input io_brupdate_b2_valid, // @[execution-unit.scala:104:14] input io_brupdate_b2_mispredict, // @[execution-unit.scala:104:14] input io_brupdate_b2_taken, // @[execution-unit.scala:104:14] input [2:0] io_brupdate_b2_cfi_type, // @[execution-unit.scala:104:14] input [1:0] io_brupdate_b2_pc_sel, // @[execution-unit.scala:104:14] input [39:0] io_brupdate_b2_jalr_target, // @[execution-unit.scala:104:14] input [20:0] io_brupdate_b2_target_offset, // @[execution-unit.scala:104:14] output [6:0] io_brinfo_uop_uopc, // @[execution-unit.scala:104:14] output [31:0] io_brinfo_uop_inst, // @[execution-unit.scala:104:14] output [31:0] io_brinfo_uop_debug_inst, // @[execution-unit.scala:104:14] output io_brinfo_uop_is_rvc, // @[execution-unit.scala:104:14] output [39:0] io_brinfo_uop_debug_pc, // @[execution-unit.scala:104:14] output [2:0] io_brinfo_uop_iq_type, // @[execution-unit.scala:104:14] output [9:0] io_brinfo_uop_fu_code, // @[execution-unit.scala:104:14] output [3:0] io_brinfo_uop_ctrl_br_type, // @[execution-unit.scala:104:14] output [1:0] io_brinfo_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14] output [2:0] io_brinfo_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14] output [2:0] io_brinfo_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14] output [4:0] io_brinfo_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14] output io_brinfo_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14] output [2:0] io_brinfo_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14] output io_brinfo_uop_ctrl_is_load, // @[execution-unit.scala:104:14] output io_brinfo_uop_ctrl_is_sta, // @[execution-unit.scala:104:14] output io_brinfo_uop_ctrl_is_std, // @[execution-unit.scala:104:14] output [1:0] io_brinfo_uop_iw_state, // @[execution-unit.scala:104:14] output io_brinfo_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14] output io_brinfo_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14] output io_brinfo_uop_is_br, // @[execution-unit.scala:104:14] output io_brinfo_uop_is_jalr, // @[execution-unit.scala:104:14] output io_brinfo_uop_is_jal, // @[execution-unit.scala:104:14] output io_brinfo_uop_is_sfb, // @[execution-unit.scala:104:14] output [15:0] io_brinfo_uop_br_mask, // @[execution-unit.scala:104:14] output [3:0] io_brinfo_uop_br_tag, // @[execution-unit.scala:104:14] output [4:0] io_brinfo_uop_ftq_idx, // @[execution-unit.scala:104:14] output io_brinfo_uop_edge_inst, // @[execution-unit.scala:104:14] output [5:0] io_brinfo_uop_pc_lob, // @[execution-unit.scala:104:14] output io_brinfo_uop_taken, // @[execution-unit.scala:104:14] output [19:0] io_brinfo_uop_imm_packed, // @[execution-unit.scala:104:14] output [11:0] io_brinfo_uop_csr_addr, // @[execution-unit.scala:104:14] output [6:0] io_brinfo_uop_rob_idx, // @[execution-unit.scala:104:14] output [4:0] io_brinfo_uop_ldq_idx, // @[execution-unit.scala:104:14] output [4:0] io_brinfo_uop_stq_idx, // @[execution-unit.scala:104:14] output [1:0] io_brinfo_uop_rxq_idx, // @[execution-unit.scala:104:14] output [6:0] io_brinfo_uop_pdst, // @[execution-unit.scala:104:14] output [6:0] io_brinfo_uop_prs1, // @[execution-unit.scala:104:14] output [6:0] io_brinfo_uop_prs2, // @[execution-unit.scala:104:14] output [6:0] io_brinfo_uop_prs3, // @[execution-unit.scala:104:14] output [4:0] io_brinfo_uop_ppred, // @[execution-unit.scala:104:14] output io_brinfo_uop_prs1_busy, // @[execution-unit.scala:104:14] output io_brinfo_uop_prs2_busy, // @[execution-unit.scala:104:14] output io_brinfo_uop_prs3_busy, // @[execution-unit.scala:104:14] output io_brinfo_uop_ppred_busy, // @[execution-unit.scala:104:14] output [6:0] io_brinfo_uop_stale_pdst, // @[execution-unit.scala:104:14] output io_brinfo_uop_exception, // @[execution-unit.scala:104:14] output [63:0] io_brinfo_uop_exc_cause, // @[execution-unit.scala:104:14] output io_brinfo_uop_bypassable, // @[execution-unit.scala:104:14] output [4:0] io_brinfo_uop_mem_cmd, // @[execution-unit.scala:104:14] output [1:0] io_brinfo_uop_mem_size, // @[execution-unit.scala:104:14] output io_brinfo_uop_mem_signed, // @[execution-unit.scala:104:14] output io_brinfo_uop_is_fence, // @[execution-unit.scala:104:14] output io_brinfo_uop_is_fencei, // @[execution-unit.scala:104:14] output io_brinfo_uop_is_amo, // @[execution-unit.scala:104:14] output io_brinfo_uop_uses_ldq, // @[execution-unit.scala:104:14] output io_brinfo_uop_uses_stq, // @[execution-unit.scala:104:14] output io_brinfo_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14] output io_brinfo_uop_is_unique, // @[execution-unit.scala:104:14] output io_brinfo_uop_flush_on_commit, // @[execution-unit.scala:104:14] output io_brinfo_uop_ldst_is_rs1, // @[execution-unit.scala:104:14] output [5:0] io_brinfo_uop_ldst, // @[execution-unit.scala:104:14] output [5:0] io_brinfo_uop_lrs1, // @[execution-unit.scala:104:14] output [5:0] io_brinfo_uop_lrs2, // @[execution-unit.scala:104:14] output [5:0] io_brinfo_uop_lrs3, // @[execution-unit.scala:104:14] output io_brinfo_uop_ldst_val, // @[execution-unit.scala:104:14] output [1:0] io_brinfo_uop_dst_rtype, // @[execution-unit.scala:104:14] output [1:0] io_brinfo_uop_lrs1_rtype, // @[execution-unit.scala:104:14] output [1:0] io_brinfo_uop_lrs2_rtype, // @[execution-unit.scala:104:14] output io_brinfo_uop_frs3_en, // @[execution-unit.scala:104:14] output io_brinfo_uop_fp_val, // @[execution-unit.scala:104:14] output io_brinfo_uop_fp_single, // @[execution-unit.scala:104:14] output io_brinfo_uop_xcpt_pf_if, // @[execution-unit.scala:104:14] output io_brinfo_uop_xcpt_ae_if, // @[execution-unit.scala:104:14] output io_brinfo_uop_xcpt_ma_if, // @[execution-unit.scala:104:14] output io_brinfo_uop_bp_debug_if, // @[execution-unit.scala:104:14] output io_brinfo_uop_bp_xcpt_if, // @[execution-unit.scala:104:14] output [1:0] io_brinfo_uop_debug_fsrc, // @[execution-unit.scala:104:14] output [1:0] io_brinfo_uop_debug_tsrc, // @[execution-unit.scala:104:14] output io_brinfo_valid, // @[execution-unit.scala:104:14] output io_brinfo_mispredict, // @[execution-unit.scala:104:14] output io_brinfo_taken, // @[execution-unit.scala:104:14] output [2:0] io_brinfo_cfi_type, // @[execution-unit.scala:104:14] output [1:0] io_brinfo_pc_sel, // @[execution-unit.scala:104:14] output [20:0] io_brinfo_target_offset, // @[execution-unit.scala:104:14] input io_status_debug, // @[execution-unit.scala:104:14] input io_status_cease, // @[execution-unit.scala:104:14] input io_status_wfi, // @[execution-unit.scala:104:14] input [1:0] io_status_dprv, // @[execution-unit.scala:104:14] input io_status_dv, // @[execution-unit.scala:104:14] input [1:0] io_status_prv, // @[execution-unit.scala:104:14] input io_status_v, // @[execution-unit.scala:104:14] input io_status_sd, // @[execution-unit.scala:104:14] input io_status_mpv, // @[execution-unit.scala:104:14] input io_status_gva, // @[execution-unit.scala:104:14] input io_status_tsr, // @[execution-unit.scala:104:14] input io_status_tw, // @[execution-unit.scala:104:14] input io_status_tvm, // @[execution-unit.scala:104:14] input io_status_mxr, // @[execution-unit.scala:104:14] input io_status_sum, // @[execution-unit.scala:104:14] input io_status_mprv, // @[execution-unit.scala:104:14] input [1:0] io_status_fs, // @[execution-unit.scala:104:14] input [1:0] io_status_mpp, // @[execution-unit.scala:104:14] input io_status_spp, // @[execution-unit.scala:104:14] input io_status_mpie, // @[execution-unit.scala:104:14] input io_status_spie, // @[execution-unit.scala:104:14] input io_status_mie, // @[execution-unit.scala:104:14] input io_status_sie, // @[execution-unit.scala:104:14] input [2:0] io_fcsr_rm // @[execution-unit.scala:104:14] ); wire _queue_io_enq_ready; // @[execution-unit.scala:347:23] wire _queue_io_empty; // @[execution-unit.scala:347:23] wire _IntToFPUnit_io_resp_valid; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_uop_uopc; // @[execution-unit.scala:339:18] wire [31:0] _IntToFPUnit_io_resp_bits_uop_inst; // @[execution-unit.scala:339:18] wire [31:0] _IntToFPUnit_io_resp_bits_uop_debug_inst; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_is_rvc; // @[execution-unit.scala:339:18] wire [39:0] _IntToFPUnit_io_resp_bits_uop_debug_pc; // @[execution-unit.scala:339:18] wire [2:0] _IntToFPUnit_io_resp_bits_uop_iq_type; // @[execution-unit.scala:339:18] wire [9:0] _IntToFPUnit_io_resp_bits_uop_fu_code; // @[execution-unit.scala:339:18] wire [3:0] _IntToFPUnit_io_resp_bits_uop_ctrl_br_type; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:339:18] wire [2:0] _IntToFPUnit_io_resp_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:339:18] wire [2:0] _IntToFPUnit_io_resp_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:339:18] wire [2:0] _IntToFPUnit_io_resp_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_ctrl_is_load; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_ctrl_is_sta; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_ctrl_is_std; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_uop_iw_state; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_is_br; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_is_jalr; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_is_jal; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_is_sfb; // @[execution-unit.scala:339:18] wire [15:0] _IntToFPUnit_io_resp_bits_uop_br_mask; // @[execution-unit.scala:339:18] wire [3:0] _IntToFPUnit_io_resp_bits_uop_br_tag; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_uop_ftq_idx; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_edge_inst; // @[execution-unit.scala:339:18] wire [5:0] _IntToFPUnit_io_resp_bits_uop_pc_lob; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_taken; // @[execution-unit.scala:339:18] wire [19:0] _IntToFPUnit_io_resp_bits_uop_imm_packed; // @[execution-unit.scala:339:18] wire [11:0] _IntToFPUnit_io_resp_bits_uop_csr_addr; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_uop_rob_idx; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_uop_ldq_idx; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_uop_stq_idx; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_uop_rxq_idx; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_uop_pdst; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_uop_prs1; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_uop_prs2; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_uop_prs3; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_uop_ppred; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_prs1_busy; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_prs2_busy; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_prs3_busy; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_ppred_busy; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_uop_stale_pdst; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_exception; // @[execution-unit.scala:339:18] wire [63:0] _IntToFPUnit_io_resp_bits_uop_exc_cause; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_bypassable; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_uop_mem_cmd; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_uop_mem_size; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_mem_signed; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_is_fence; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_is_fencei; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_is_amo; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_uses_ldq; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_uses_stq; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_is_unique; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_flush_on_commit; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_ldst_is_rs1; // @[execution-unit.scala:339:18] wire [5:0] _IntToFPUnit_io_resp_bits_uop_ldst; // @[execution-unit.scala:339:18] wire [5:0] _IntToFPUnit_io_resp_bits_uop_lrs1; // @[execution-unit.scala:339:18] wire [5:0] _IntToFPUnit_io_resp_bits_uop_lrs2; // @[execution-unit.scala:339:18] wire [5:0] _IntToFPUnit_io_resp_bits_uop_lrs3; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_ldst_val; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_uop_dst_rtype; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_uop_lrs1_rtype; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_uop_lrs2_rtype; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_frs3_en; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_fp_val; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_fp_single; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_xcpt_pf_if; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_xcpt_ae_if; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_xcpt_ma_if; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_bp_debug_if; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_uop_bp_xcpt_if; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_uop_debug_fsrc; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_uop_debug_tsrc; // @[execution-unit.scala:339:18] wire [64:0] _IntToFPUnit_io_resp_bits_data; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_valid; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_uopc; // @[execution-unit.scala:339:18] wire [31:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_inst; // @[execution-unit.scala:339:18] wire [31:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_inst; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_rvc; // @[execution-unit.scala:339:18] wire [39:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_pc; // @[execution-unit.scala:339:18] wire [2:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_iq_type; // @[execution-unit.scala:339:18] wire [9:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_fu_code; // @[execution-unit.scala:339:18] wire [3:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:339:18] wire [2:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:339:18] wire [2:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:339:18] wire [2:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_iw_state; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_br; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_jalr; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_jal; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_sfb; // @[execution-unit.scala:339:18] wire [15:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_br_mask; // @[execution-unit.scala:339:18] wire [3:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_br_tag; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_edge_inst; // @[execution-unit.scala:339:18] wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_pc_lob; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_taken; // @[execution-unit.scala:339:18] wire [19:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_imm_packed; // @[execution-unit.scala:339:18] wire [11:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_csr_addr; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_rob_idx; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_stq_idx; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_pdst; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_prs1; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_prs2; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_prs3; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ppred; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:339:18] wire [6:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_exception; // @[execution-unit.scala:339:18] wire [63:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_exc_cause; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_bypassable; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_mem_size; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_mem_signed; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_fence; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_fencei; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_amo; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_uses_stq; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_is_unique; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:339:18] wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_ldst; // @[execution-unit.scala:339:18] wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs1; // @[execution-unit.scala:339:18] wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs2; // @[execution-unit.scala:339:18] wire [5:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs3; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_ldst_val; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_frs3_en; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_fp_val; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_fp_single; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:339:18] wire _IntToFPUnit_io_resp_bits_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:339:18] wire [1:0] _IntToFPUnit_io_resp_bits_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:339:18] wire [4:0] _IntToFPUnit_io_resp_bits_fflags_bits_flags; // @[execution-unit.scala:339:18] wire [19:0] _ALUUnit_io_resp_bits_uop_imm_packed; // @[execution-unit.scala:271:17] wire [63:0] _ALUUnit_io_resp_bits_data; // @[execution-unit.scala:271:17] wire [63:0] _ALUUnit_io_bypass_0_bits_data; // @[execution-unit.scala:271:17] wire io_req_valid_0 = io_req_valid; // @[execution-unit.scala:204:7] wire [6:0] io_req_bits_uop_uopc_0 = io_req_bits_uop_uopc; // @[execution-unit.scala:204:7] wire [31:0] io_req_bits_uop_inst_0 = io_req_bits_uop_inst; // @[execution-unit.scala:204:7] wire [31:0] io_req_bits_uop_debug_inst_0 = io_req_bits_uop_debug_inst; // @[execution-unit.scala:204:7] wire io_req_bits_uop_is_rvc_0 = io_req_bits_uop_is_rvc; // @[execution-unit.scala:204:7] wire [39:0] io_req_bits_uop_debug_pc_0 = io_req_bits_uop_debug_pc; // @[execution-unit.scala:204:7] wire [2:0] io_req_bits_uop_iq_type_0 = io_req_bits_uop_iq_type; // @[execution-unit.scala:204:7] wire [9:0] io_req_bits_uop_fu_code_0 = io_req_bits_uop_fu_code; // @[execution-unit.scala:204:7] wire [3:0] io_req_bits_uop_ctrl_br_type_0 = io_req_bits_uop_ctrl_br_type; // @[execution-unit.scala:204:7] wire [1:0] io_req_bits_uop_ctrl_op1_sel_0 = io_req_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:204:7] wire [2:0] io_req_bits_uop_ctrl_op2_sel_0 = io_req_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:204:7] wire [2:0] io_req_bits_uop_ctrl_imm_sel_0 = io_req_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:204:7] wire [4:0] io_req_bits_uop_ctrl_op_fcn_0 = io_req_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:204:7] wire io_req_bits_uop_ctrl_fcn_dw_0 = io_req_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:204:7] wire [2:0] io_req_bits_uop_ctrl_csr_cmd_0 = io_req_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:204:7] wire io_req_bits_uop_ctrl_is_load_0 = io_req_bits_uop_ctrl_is_load; // @[execution-unit.scala:204:7] wire io_req_bits_uop_ctrl_is_sta_0 = io_req_bits_uop_ctrl_is_sta; // @[execution-unit.scala:204:7] wire io_req_bits_uop_ctrl_is_std_0 = io_req_bits_uop_ctrl_is_std; // @[execution-unit.scala:204:7] wire [1:0] io_req_bits_uop_iw_state_0 = io_req_bits_uop_iw_state; // @[execution-unit.scala:204:7] wire io_req_bits_uop_iw_p1_poisoned_0 = io_req_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:204:7] wire io_req_bits_uop_iw_p2_poisoned_0 = io_req_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:204:7] wire io_req_bits_uop_is_br_0 = io_req_bits_uop_is_br; // @[execution-unit.scala:204:7] wire io_req_bits_uop_is_jalr_0 = io_req_bits_uop_is_jalr; // @[execution-unit.scala:204:7] wire io_req_bits_uop_is_jal_0 = io_req_bits_uop_is_jal; // @[execution-unit.scala:204:7] wire io_req_bits_uop_is_sfb_0 = io_req_bits_uop_is_sfb; // @[execution-unit.scala:204:7] wire [15:0] io_req_bits_uop_br_mask_0 = io_req_bits_uop_br_mask; // @[execution-unit.scala:204:7] wire [3:0] io_req_bits_uop_br_tag_0 = io_req_bits_uop_br_tag; // @[execution-unit.scala:204:7] wire [4:0] io_req_bits_uop_ftq_idx_0 = io_req_bits_uop_ftq_idx; // @[execution-unit.scala:204:7] wire io_req_bits_uop_edge_inst_0 = io_req_bits_uop_edge_inst; // @[execution-unit.scala:204:7] wire [5:0] io_req_bits_uop_pc_lob_0 = io_req_bits_uop_pc_lob; // @[execution-unit.scala:204:7] wire io_req_bits_uop_taken_0 = io_req_bits_uop_taken; // @[execution-unit.scala:204:7] wire [19:0] io_req_bits_uop_imm_packed_0 = io_req_bits_uop_imm_packed; // @[execution-unit.scala:204:7] wire [11:0] io_req_bits_uop_csr_addr_0 = io_req_bits_uop_csr_addr; // @[execution-unit.scala:204:7] wire [6:0] io_req_bits_uop_rob_idx_0 = io_req_bits_uop_rob_idx; // @[execution-unit.scala:204:7] wire [4:0] io_req_bits_uop_ldq_idx_0 = io_req_bits_uop_ldq_idx; // @[execution-unit.scala:204:7] wire [4:0] io_req_bits_uop_stq_idx_0 = io_req_bits_uop_stq_idx; // @[execution-unit.scala:204:7] wire [1:0] io_req_bits_uop_rxq_idx_0 = io_req_bits_uop_rxq_idx; // @[execution-unit.scala:204:7] wire [6:0] io_req_bits_uop_pdst_0 = io_req_bits_uop_pdst; // @[execution-unit.scala:204:7] wire [6:0] io_req_bits_uop_prs1_0 = io_req_bits_uop_prs1; // @[execution-unit.scala:204:7] wire [6:0] io_req_bits_uop_prs2_0 = io_req_bits_uop_prs2; // @[execution-unit.scala:204:7] wire [6:0] io_req_bits_uop_prs3_0 = io_req_bits_uop_prs3; // @[execution-unit.scala:204:7] wire [4:0] io_req_bits_uop_ppred_0 = io_req_bits_uop_ppred; // @[execution-unit.scala:204:7] wire io_req_bits_uop_prs1_busy_0 = io_req_bits_uop_prs1_busy; // @[execution-unit.scala:204:7] wire io_req_bits_uop_prs2_busy_0 = io_req_bits_uop_prs2_busy; // @[execution-unit.scala:204:7] wire io_req_bits_uop_prs3_busy_0 = io_req_bits_uop_prs3_busy; // @[execution-unit.scala:204:7] wire io_req_bits_uop_ppred_busy_0 = io_req_bits_uop_ppred_busy; // @[execution-unit.scala:204:7] wire [6:0] io_req_bits_uop_stale_pdst_0 = io_req_bits_uop_stale_pdst; // @[execution-unit.scala:204:7] wire io_req_bits_uop_exception_0 = io_req_bits_uop_exception; // @[execution-unit.scala:204:7] wire [63:0] io_req_bits_uop_exc_cause_0 = io_req_bits_uop_exc_cause; // @[execution-unit.scala:204:7] wire io_req_bits_uop_bypassable_0 = io_req_bits_uop_bypassable; // @[execution-unit.scala:204:7] wire [4:0] io_req_bits_uop_mem_cmd_0 = io_req_bits_uop_mem_cmd; // @[execution-unit.scala:204:7] wire [1:0] io_req_bits_uop_mem_size_0 = io_req_bits_uop_mem_size; // @[execution-unit.scala:204:7] wire io_req_bits_uop_mem_signed_0 = io_req_bits_uop_mem_signed; // @[execution-unit.scala:204:7] wire io_req_bits_uop_is_fence_0 = io_req_bits_uop_is_fence; // @[execution-unit.scala:204:7] wire io_req_bits_uop_is_fencei_0 = io_req_bits_uop_is_fencei; // @[execution-unit.scala:204:7] wire io_req_bits_uop_is_amo_0 = io_req_bits_uop_is_amo; // @[execution-unit.scala:204:7] wire io_req_bits_uop_uses_ldq_0 = io_req_bits_uop_uses_ldq; // @[execution-unit.scala:204:7] wire io_req_bits_uop_uses_stq_0 = io_req_bits_uop_uses_stq; // @[execution-unit.scala:204:7] wire io_req_bits_uop_is_sys_pc2epc_0 = io_req_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:204:7] wire io_req_bits_uop_is_unique_0 = io_req_bits_uop_is_unique; // @[execution-unit.scala:204:7] wire io_req_bits_uop_flush_on_commit_0 = io_req_bits_uop_flush_on_commit; // @[execution-unit.scala:204:7] wire io_req_bits_uop_ldst_is_rs1_0 = io_req_bits_uop_ldst_is_rs1; // @[execution-unit.scala:204:7] wire [5:0] io_req_bits_uop_ldst_0 = io_req_bits_uop_ldst; // @[execution-unit.scala:204:7] wire [5:0] io_req_bits_uop_lrs1_0 = io_req_bits_uop_lrs1; // @[execution-unit.scala:204:7] wire [5:0] io_req_bits_uop_lrs2_0 = io_req_bits_uop_lrs2; // @[execution-unit.scala:204:7] wire [5:0] io_req_bits_uop_lrs3_0 = io_req_bits_uop_lrs3; // @[execution-unit.scala:204:7] wire io_req_bits_uop_ldst_val_0 = io_req_bits_uop_ldst_val; // @[execution-unit.scala:204:7] wire [1:0] io_req_bits_uop_dst_rtype_0 = io_req_bits_uop_dst_rtype; // @[execution-unit.scala:204:7] wire [1:0] io_req_bits_uop_lrs1_rtype_0 = io_req_bits_uop_lrs1_rtype; // @[execution-unit.scala:204:7] wire [1:0] io_req_bits_uop_lrs2_rtype_0 = io_req_bits_uop_lrs2_rtype; // @[execution-unit.scala:204:7] wire io_req_bits_uop_frs3_en_0 = io_req_bits_uop_frs3_en; // @[execution-unit.scala:204:7] wire io_req_bits_uop_fp_val_0 = io_req_bits_uop_fp_val; // @[execution-unit.scala:204:7] wire io_req_bits_uop_fp_single_0 = io_req_bits_uop_fp_single; // @[execution-unit.scala:204:7] wire io_req_bits_uop_xcpt_pf_if_0 = io_req_bits_uop_xcpt_pf_if; // @[execution-unit.scala:204:7] wire io_req_bits_uop_xcpt_ae_if_0 = io_req_bits_uop_xcpt_ae_if; // @[execution-unit.scala:204:7] wire io_req_bits_uop_xcpt_ma_if_0 = io_req_bits_uop_xcpt_ma_if; // @[execution-unit.scala:204:7] wire io_req_bits_uop_bp_debug_if_0 = io_req_bits_uop_bp_debug_if; // @[execution-unit.scala:204:7] wire io_req_bits_uop_bp_xcpt_if_0 = io_req_bits_uop_bp_xcpt_if; // @[execution-unit.scala:204:7] wire [1:0] io_req_bits_uop_debug_fsrc_0 = io_req_bits_uop_debug_fsrc; // @[execution-unit.scala:204:7] wire [1:0] io_req_bits_uop_debug_tsrc_0 = io_req_bits_uop_debug_tsrc; // @[execution-unit.scala:204:7] wire [64:0] io_req_bits_rs1_data_0 = io_req_bits_rs1_data; // @[execution-unit.scala:204:7] wire [64:0] io_req_bits_rs2_data_0 = io_req_bits_rs2_data; // @[execution-unit.scala:204:7] wire io_req_bits_kill_0 = io_req_bits_kill; // @[execution-unit.scala:204:7] wire io_ll_fresp_ready_0 = io_ll_fresp_ready; // @[execution-unit.scala:204:7] wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[execution-unit.scala:204:7] wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[execution-unit.scala:204:7] wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[execution-unit.scala:204:7] wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[execution-unit.scala:204:7] wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[execution-unit.scala:204:7] wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[execution-unit.scala:204:7] wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[execution-unit.scala:204:7] wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[execution-unit.scala:204:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[execution-unit.scala:204:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[execution-unit.scala:204:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[execution-unit.scala:204:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[execution-unit.scala:204:7] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[execution-unit.scala:204:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[execution-unit.scala:204:7] wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[execution-unit.scala:204:7] wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[execution-unit.scala:204:7] wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[execution-unit.scala:204:7] wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[execution-unit.scala:204:7] wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[execution-unit.scala:204:7] wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[execution-unit.scala:204:7] wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[execution-unit.scala:204:7] wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[execution-unit.scala:204:7] wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[execution-unit.scala:204:7] wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[execution-unit.scala:204:7] wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[execution-unit.scala:204:7] wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[execution-unit.scala:204:7] wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[execution-unit.scala:204:7] wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[execution-unit.scala:204:7] wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[execution-unit.scala:204:7] wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[execution-unit.scala:204:7] wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[execution-unit.scala:204:7] wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[execution-unit.scala:204:7] wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[execution-unit.scala:204:7] wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[execution-unit.scala:204:7] wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[execution-unit.scala:204:7] wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[execution-unit.scala:204:7] wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[execution-unit.scala:204:7] wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[execution-unit.scala:204:7] wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[execution-unit.scala:204:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[execution-unit.scala:204:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[execution-unit.scala:204:7] wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[execution-unit.scala:204:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[execution-unit.scala:204:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[execution-unit.scala:204:7] wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[execution-unit.scala:204:7] wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[execution-unit.scala:204:7] wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[execution-unit.scala:204:7] wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[execution-unit.scala:204:7] wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[execution-unit.scala:204:7] wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[execution-unit.scala:204:7] wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[execution-unit.scala:204:7] wire io_status_debug_0 = io_status_debug; // @[execution-unit.scala:204:7] wire io_status_cease_0 = io_status_cease; // @[execution-unit.scala:204:7] wire io_status_wfi_0 = io_status_wfi; // @[execution-unit.scala:204:7] wire [1:0] io_status_dprv_0 = io_status_dprv; // @[execution-unit.scala:204:7] wire io_status_dv_0 = io_status_dv; // @[execution-unit.scala:204:7] wire [1:0] io_status_prv_0 = io_status_prv; // @[execution-unit.scala:204:7] wire io_status_v_0 = io_status_v; // @[execution-unit.scala:204:7] wire io_status_sd_0 = io_status_sd; // @[execution-unit.scala:204:7] wire io_status_mpv_0 = io_status_mpv; // @[execution-unit.scala:204:7] wire io_status_gva_0 = io_status_gva; // @[execution-unit.scala:204:7] wire io_status_tsr_0 = io_status_tsr; // @[execution-unit.scala:204:7] wire io_status_tw_0 = io_status_tw; // @[execution-unit.scala:204:7] wire io_status_tvm_0 = io_status_tvm; // @[execution-unit.scala:204:7] wire io_status_mxr_0 = io_status_mxr; // @[execution-unit.scala:204:7] wire io_status_sum_0 = io_status_sum; // @[execution-unit.scala:204:7] wire io_status_mprv_0 = io_status_mprv; // @[execution-unit.scala:204:7] wire [1:0] io_status_fs_0 = io_status_fs; // @[execution-unit.scala:204:7] wire [1:0] io_status_mpp_0 = io_status_mpp; // @[execution-unit.scala:204:7] wire io_status_spp_0 = io_status_spp; // @[execution-unit.scala:204:7] wire io_status_mpie_0 = io_status_mpie; // @[execution-unit.scala:204:7] wire io_status_spie_0 = io_status_spie; // @[execution-unit.scala:204:7] wire io_status_mie_0 = io_status_mie; // @[execution-unit.scala:204:7] wire io_status_sie_0 = io_status_sie; // @[execution-unit.scala:204:7] wire [2:0] io_fcsr_rm_0 = io_fcsr_rm; // @[execution-unit.scala:204:7] wire io_req_bits_pred_data = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_predicated = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_valid = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_is_rvc = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_ctrl_fcn_dw = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_ctrl_is_load = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_ctrl_is_sta = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_ctrl_is_std = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_iw_p1_poisoned = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_iw_p2_poisoned = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_is_br = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_is_jalr = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_is_jal = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_is_sfb = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_edge_inst = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_taken = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_prs1_busy = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_prs2_busy = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_prs3_busy = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_ppred_busy = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_exception = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_bypassable = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_mem_signed = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_is_fence = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_is_fencei = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_is_amo = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_uses_ldq = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_uses_stq = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_is_sys_pc2epc = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_is_unique = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_flush_on_commit = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_ldst_is_rs1 = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_ldst_val = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_frs3_en = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_fp_val = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_fp_single = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_xcpt_pf_if = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_xcpt_ae_if = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_xcpt_ma_if = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_bp_debug_if = 1'h0; // @[execution-unit.scala:204:7] wire io_iresp_bits_fflags_bits_uop_bp_xcpt_if = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_predicated = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_valid = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_is_rvc = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_ctrl_fcn_dw = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_ctrl_is_load = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_ctrl_is_sta = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_ctrl_is_std = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_iw_p1_poisoned = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_iw_p2_poisoned = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_is_br = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_is_jalr = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_is_jal = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_is_sfb = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_edge_inst = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_taken = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_prs1_busy = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_prs2_busy = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_prs3_busy = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_ppred_busy = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_exception = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_bypassable = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_mem_signed = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_is_fence = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_is_fencei = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_is_amo = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_uses_ldq = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_uses_stq = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_is_sys_pc2epc = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_is_unique = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_flush_on_commit = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_ldst_is_rs1 = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_ldst_val = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_frs3_en = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_fp_val = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_fp_single = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_xcpt_pf_if = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_xcpt_ae_if = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_xcpt_ma_if = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_bp_debug_if = 1'h0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_fflags_bits_uop_bp_xcpt_if = 1'h0; // @[execution-unit.scala:204:7] wire io_status_mbe = 1'h0; // @[execution-unit.scala:204:7] wire io_status_sbe = 1'h0; // @[execution-unit.scala:204:7] wire io_status_sd_rv32 = 1'h0; // @[execution-unit.scala:204:7] wire io_status_ube = 1'h0; // @[execution-unit.scala:204:7] wire io_status_upie = 1'h0; // @[execution-unit.scala:204:7] wire io_status_hie = 1'h0; // @[execution-unit.scala:204:7] wire io_status_uie = 1'h0; // @[execution-unit.scala:204:7] wire div_busy = 1'h0; // @[execution-unit.scala:253:27] wire _io_fu_types_T_4 = 1'h0; // @[execution-unit.scala:262:32] wire div_resp_val = 1'h0; // @[execution-unit.scala:364:30] wire _io_iresp_bits_uop_csr_addr_i30_20_T = 1'h0; // @[util.scala:274:27] wire _io_iresp_bits_uop_csr_addr_i19_12_T = 1'h0; // @[util.scala:275:27] wire _io_iresp_bits_uop_csr_addr_i19_12_T_1 = 1'h0; // @[util.scala:275:44] wire _io_iresp_bits_uop_csr_addr_i19_12_T_2 = 1'h0; // @[util.scala:275:36] wire _io_iresp_bits_uop_csr_addr_i11_T = 1'h0; // @[util.scala:276:27] wire _io_iresp_bits_uop_csr_addr_i11_T_1 = 1'h0; // @[util.scala:277:27] wire _io_iresp_bits_uop_csr_addr_i11_T_2 = 1'h0; // @[util.scala:277:44] wire _io_iresp_bits_uop_csr_addr_i11_T_3 = 1'h0; // @[util.scala:277:36] wire _io_iresp_bits_uop_csr_addr_i10_5_T = 1'h0; // @[util.scala:278:27] wire _io_iresp_bits_uop_csr_addr_i4_1_T = 1'h0; // @[util.scala:279:27] wire _io_iresp_bits_uop_csr_addr_i0_T = 1'h0; // @[util.scala:280:27] wire [31:0] io_status_isa = 32'h14112D; // @[execution-unit.scala:204:7] wire [22:0] io_status_zero2 = 23'h0; // @[execution-unit.scala:204:7] wire [7:0] io_status_zero1 = 8'h0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_fflags_bits_uop_ctrl_op1_sel = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_fflags_bits_uop_iw_state = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_fflags_bits_uop_rxq_idx = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_fflags_bits_uop_mem_size = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_fflags_bits_uop_dst_rtype = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_fflags_bits_uop_lrs1_rtype = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_fflags_bits_uop_lrs2_rtype = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_fflags_bits_uop_debug_fsrc = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_fflags_bits_uop_debug_tsrc = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_ctrl_op1_sel = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_iw_state = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_rxq_idx = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_mem_size = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_dst_rtype = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_lrs1_rtype = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_lrs2_rtype = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_debug_fsrc = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_debug_tsrc = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_status_xs = 2'h0; // @[execution-unit.scala:204:7] wire [1:0] io_status_vs = 2'h0; // @[execution-unit.scala:204:7] wire io_req_ready = 1'h1; // @[execution-unit.scala:204:7] wire io_iresp_ready = 1'h1; // @[execution-unit.scala:204:7] wire _io_fu_types_T_3 = 1'h1; // @[execution-unit.scala:262:22] wire _io_iresp_bits_uop_csr_addr_i0_T_1 = 1'h1; // @[util.scala:280:44] wire _io_iresp_bits_uop_csr_addr_i0_T_2 = 1'h1; // @[util.scala:280:36] wire [64:0] io_req_bits_rs3_data = 65'h0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_fflags_bits_uop_uopc = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_fflags_bits_uop_rob_idx = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_fflags_bits_uop_pdst = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_fflags_bits_uop_prs1 = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_fflags_bits_uop_prs2 = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_fflags_bits_uop_prs3 = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_fflags_bits_uop_stale_pdst = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_uopc = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_rob_idx = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_pdst = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_prs1 = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_prs2 = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_prs3 = 7'h0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_stale_pdst = 7'h0; // @[execution-unit.scala:204:7] wire [31:0] io_iresp_bits_fflags_bits_uop_inst = 32'h0; // @[execution-unit.scala:204:7] wire [31:0] io_iresp_bits_fflags_bits_uop_debug_inst = 32'h0; // @[execution-unit.scala:204:7] wire [31:0] io_bypass_0_bits_fflags_bits_uop_inst = 32'h0; // @[execution-unit.scala:204:7] wire [31:0] io_bypass_0_bits_fflags_bits_uop_debug_inst = 32'h0; // @[execution-unit.scala:204:7] wire [39:0] io_iresp_bits_fflags_bits_uop_debug_pc = 40'h0; // @[execution-unit.scala:204:7] wire [39:0] io_bypass_0_bits_fflags_bits_uop_debug_pc = 40'h0; // @[execution-unit.scala:204:7] wire [39:0] io_brinfo_jalr_target = 40'h0; // @[execution-unit.scala:204:7] wire [2:0] io_iresp_bits_fflags_bits_uop_iq_type = 3'h0; // @[execution-unit.scala:204:7] wire [2:0] io_iresp_bits_fflags_bits_uop_ctrl_op2_sel = 3'h0; // @[execution-unit.scala:204:7] wire [2:0] io_iresp_bits_fflags_bits_uop_ctrl_imm_sel = 3'h0; // @[execution-unit.scala:204:7] wire [2:0] io_iresp_bits_fflags_bits_uop_ctrl_csr_cmd = 3'h0; // @[execution-unit.scala:204:7] wire [2:0] io_bypass_0_bits_fflags_bits_uop_iq_type = 3'h0; // @[execution-unit.scala:204:7] wire [2:0] io_bypass_0_bits_fflags_bits_uop_ctrl_op2_sel = 3'h0; // @[execution-unit.scala:204:7] wire [2:0] io_bypass_0_bits_fflags_bits_uop_ctrl_imm_sel = 3'h0; // @[execution-unit.scala:204:7] wire [2:0] io_bypass_0_bits_fflags_bits_uop_ctrl_csr_cmd = 3'h0; // @[execution-unit.scala:204:7] wire [9:0] io_iresp_bits_fflags_bits_uop_fu_code = 10'h0; // @[execution-unit.scala:204:7] wire [9:0] io_bypass_0_bits_fflags_bits_uop_fu_code = 10'h0; // @[execution-unit.scala:204:7] wire [9:0] _io_fu_types_T_1 = 10'h0; // @[execution-unit.scala:261:21] wire [9:0] _io_fu_types_T_5 = 10'h0; // @[execution-unit.scala:262:21] wire [9:0] _io_fu_types_T_9 = 10'h0; // @[execution-unit.scala:264:21] wire [9:0] _io_fu_types_T_15 = 10'h0; // @[execution-unit.scala:266:21] wire [3:0] io_iresp_bits_fflags_bits_uop_ctrl_br_type = 4'h0; // @[execution-unit.scala:204:7] wire [3:0] io_iresp_bits_fflags_bits_uop_br_tag = 4'h0; // @[execution-unit.scala:204:7] wire [3:0] io_bypass_0_bits_fflags_bits_uop_ctrl_br_type = 4'h0; // @[execution-unit.scala:204:7] wire [3:0] io_bypass_0_bits_fflags_bits_uop_br_tag = 4'h0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_fflags_bits_uop_ctrl_op_fcn = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_fflags_bits_uop_ftq_idx = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_fflags_bits_uop_ldq_idx = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_fflags_bits_uop_stq_idx = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_fflags_bits_uop_ppred = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_fflags_bits_uop_mem_cmd = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_fflags_bits_flags = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_fflags_bits_uop_ctrl_op_fcn = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_fflags_bits_uop_ftq_idx = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_fflags_bits_uop_ldq_idx = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_fflags_bits_uop_stq_idx = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_fflags_bits_uop_ppred = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_fflags_bits_uop_mem_cmd = 5'h0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_fflags_bits_flags = 5'h0; // @[execution-unit.scala:204:7] wire [15:0] io_iresp_bits_fflags_bits_uop_br_mask = 16'h0; // @[execution-unit.scala:204:7] wire [15:0] io_bypass_0_bits_fflags_bits_uop_br_mask = 16'h0; // @[execution-unit.scala:204:7] wire [5:0] io_iresp_bits_fflags_bits_uop_pc_lob = 6'h0; // @[execution-unit.scala:204:7] wire [5:0] io_iresp_bits_fflags_bits_uop_ldst = 6'h0; // @[execution-unit.scala:204:7] wire [5:0] io_iresp_bits_fflags_bits_uop_lrs1 = 6'h0; // @[execution-unit.scala:204:7] wire [5:0] io_iresp_bits_fflags_bits_uop_lrs2 = 6'h0; // @[execution-unit.scala:204:7] wire [5:0] io_iresp_bits_fflags_bits_uop_lrs3 = 6'h0; // @[execution-unit.scala:204:7] wire [5:0] io_bypass_0_bits_fflags_bits_uop_pc_lob = 6'h0; // @[execution-unit.scala:204:7] wire [5:0] io_bypass_0_bits_fflags_bits_uop_ldst = 6'h0; // @[execution-unit.scala:204:7] wire [5:0] io_bypass_0_bits_fflags_bits_uop_lrs1 = 6'h0; // @[execution-unit.scala:204:7] wire [5:0] io_bypass_0_bits_fflags_bits_uop_lrs2 = 6'h0; // @[execution-unit.scala:204:7] wire [5:0] io_bypass_0_bits_fflags_bits_uop_lrs3 = 6'h0; // @[execution-unit.scala:204:7] wire [19:0] io_iresp_bits_fflags_bits_uop_imm_packed = 20'h0; // @[execution-unit.scala:204:7] wire [19:0] io_bypass_0_bits_fflags_bits_uop_imm_packed = 20'h0; // @[execution-unit.scala:204:7] wire [11:0] io_iresp_bits_fflags_bits_uop_csr_addr = 12'h0; // @[execution-unit.scala:204:7] wire [11:0] io_bypass_0_bits_fflags_bits_uop_csr_addr = 12'h0; // @[execution-unit.scala:204:7] wire [63:0] io_iresp_bits_fflags_bits_uop_exc_cause = 64'h0; // @[execution-unit.scala:204:7] wire [63:0] io_bypass_0_bits_fflags_bits_uop_exc_cause = 64'h0; // @[execution-unit.scala:204:7] wire [1:0] io_status_sxl = 2'h2; // @[execution-unit.scala:204:7] wire [1:0] io_status_uxl = 2'h2; // @[execution-unit.scala:204:7] wire [9:0] _io_fu_types_T_8 = 10'h21; // @[execution-unit.scala:262:58] wire [9:0] _io_fu_types_T_10 = 10'h21; // @[execution-unit.scala:263:45] wire [9:0] _io_fu_types_T = 10'h1; // @[execution-unit.scala:260:21] wire [9:0] _io_fu_types_T_2 = 10'h1; // @[execution-unit.scala:260:45] wire [9:0] _io_fu_types_T_6 = 10'h1; // @[execution-unit.scala:261:45] wire [9:0] _io_fu_types_T_7 = 10'h20; // @[execution-unit.scala:263:21] wire [9:0] _io_fu_types_T_16; // @[execution-unit.scala:265:60] wire [3:0] io_iresp_bits_uop_ctrl_br_type_0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_uop_ctrl_op1_sel_0; // @[execution-unit.scala:204:7] wire [2:0] io_iresp_bits_uop_ctrl_op2_sel_0; // @[execution-unit.scala:204:7] wire [2:0] io_iresp_bits_uop_ctrl_imm_sel_0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_uop_ctrl_op_fcn_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:204:7] wire [2:0] io_iresp_bits_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_ctrl_is_load_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_ctrl_is_sta_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_ctrl_is_std_0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_uop_uopc_0; // @[execution-unit.scala:204:7] wire [31:0] io_iresp_bits_uop_inst_0; // @[execution-unit.scala:204:7] wire [31:0] io_iresp_bits_uop_debug_inst_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_is_rvc_0; // @[execution-unit.scala:204:7] wire [39:0] io_iresp_bits_uop_debug_pc_0; // @[execution-unit.scala:204:7] wire [2:0] io_iresp_bits_uop_iq_type_0; // @[execution-unit.scala:204:7] wire [9:0] io_iresp_bits_uop_fu_code_0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_uop_iw_state_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_iw_p1_poisoned_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_iw_p2_poisoned_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_is_br_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_is_jalr_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_is_jal_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_is_sfb_0; // @[execution-unit.scala:204:7] wire [15:0] io_iresp_bits_uop_br_mask_0; // @[execution-unit.scala:204:7] wire [3:0] io_iresp_bits_uop_br_tag_0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_uop_ftq_idx_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_edge_inst_0; // @[execution-unit.scala:204:7] wire [5:0] io_iresp_bits_uop_pc_lob_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_taken_0; // @[execution-unit.scala:204:7] wire [19:0] io_iresp_bits_uop_imm_packed_0; // @[execution-unit.scala:204:7] wire [11:0] io_iresp_bits_uop_csr_addr_0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_uop_rob_idx_0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_uop_ldq_idx_0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_uop_stq_idx_0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_uop_rxq_idx_0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_uop_pdst_0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_uop_prs1_0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_uop_prs2_0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_uop_prs3_0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_uop_ppred_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_prs1_busy_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_prs2_busy_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_prs3_busy_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_ppred_busy_0; // @[execution-unit.scala:204:7] wire [6:0] io_iresp_bits_uop_stale_pdst_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_exception_0; // @[execution-unit.scala:204:7] wire [63:0] io_iresp_bits_uop_exc_cause_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_bypassable_0; // @[execution-unit.scala:204:7] wire [4:0] io_iresp_bits_uop_mem_cmd_0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_uop_mem_size_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_mem_signed_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_is_fence_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_is_fencei_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_is_amo_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_uses_ldq_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_uses_stq_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_is_sys_pc2epc_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_is_unique_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_flush_on_commit_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_ldst_is_rs1_0; // @[execution-unit.scala:204:7] wire [5:0] io_iresp_bits_uop_ldst_0; // @[execution-unit.scala:204:7] wire [5:0] io_iresp_bits_uop_lrs1_0; // @[execution-unit.scala:204:7] wire [5:0] io_iresp_bits_uop_lrs2_0; // @[execution-unit.scala:204:7] wire [5:0] io_iresp_bits_uop_lrs3_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_ldst_val_0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_uop_dst_rtype_0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_uop_lrs1_rtype_0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_uop_lrs2_rtype_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_frs3_en_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_fp_val_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_fp_single_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_xcpt_pf_if_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_xcpt_ae_if_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_xcpt_ma_if_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_bp_debug_if_0; // @[execution-unit.scala:204:7] wire io_iresp_bits_uop_bp_xcpt_if_0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_uop_debug_fsrc_0; // @[execution-unit.scala:204:7] wire [1:0] io_iresp_bits_uop_debug_tsrc_0; // @[execution-unit.scala:204:7] wire [64:0] io_iresp_bits_data_0; // @[execution-unit.scala:204:7] wire io_iresp_valid_0; // @[execution-unit.scala:204:7] wire [3:0] io_ll_fresp_bits_uop_ctrl_br_type_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_uop_ctrl_op1_sel_0; // @[execution-unit.scala:204:7] wire [2:0] io_ll_fresp_bits_uop_ctrl_op2_sel_0; // @[execution-unit.scala:204:7] wire [2:0] io_ll_fresp_bits_uop_ctrl_imm_sel_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_uop_ctrl_op_fcn_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:204:7] wire [2:0] io_ll_fresp_bits_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_ctrl_is_load_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_ctrl_is_sta_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_ctrl_is_std_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_uop_uopc_0; // @[execution-unit.scala:204:7] wire [31:0] io_ll_fresp_bits_uop_inst_0; // @[execution-unit.scala:204:7] wire [31:0] io_ll_fresp_bits_uop_debug_inst_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_is_rvc_0; // @[execution-unit.scala:204:7] wire [39:0] io_ll_fresp_bits_uop_debug_pc_0; // @[execution-unit.scala:204:7] wire [2:0] io_ll_fresp_bits_uop_iq_type_0; // @[execution-unit.scala:204:7] wire [9:0] io_ll_fresp_bits_uop_fu_code_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_uop_iw_state_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_iw_p1_poisoned_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_iw_p2_poisoned_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_is_br_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_is_jalr_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_is_jal_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_is_sfb_0; // @[execution-unit.scala:204:7] wire [15:0] io_ll_fresp_bits_uop_br_mask_0; // @[execution-unit.scala:204:7] wire [3:0] io_ll_fresp_bits_uop_br_tag_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_uop_ftq_idx_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_edge_inst_0; // @[execution-unit.scala:204:7] wire [5:0] io_ll_fresp_bits_uop_pc_lob_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_taken_0; // @[execution-unit.scala:204:7] wire [19:0] io_ll_fresp_bits_uop_imm_packed_0; // @[execution-unit.scala:204:7] wire [11:0] io_ll_fresp_bits_uop_csr_addr_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_uop_rob_idx_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_uop_ldq_idx_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_uop_stq_idx_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_uop_rxq_idx_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_uop_pdst_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_uop_prs1_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_uop_prs2_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_uop_prs3_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_uop_ppred_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_prs1_busy_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_prs2_busy_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_prs3_busy_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_ppred_busy_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_uop_stale_pdst_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_exception_0; // @[execution-unit.scala:204:7] wire [63:0] io_ll_fresp_bits_uop_exc_cause_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_bypassable_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_uop_mem_cmd_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_uop_mem_size_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_mem_signed_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_is_fence_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_is_fencei_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_is_amo_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_uses_ldq_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_uses_stq_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_is_sys_pc2epc_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_is_unique_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_flush_on_commit_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_ldst_is_rs1_0; // @[execution-unit.scala:204:7] wire [5:0] io_ll_fresp_bits_uop_ldst_0; // @[execution-unit.scala:204:7] wire [5:0] io_ll_fresp_bits_uop_lrs1_0; // @[execution-unit.scala:204:7] wire [5:0] io_ll_fresp_bits_uop_lrs2_0; // @[execution-unit.scala:204:7] wire [5:0] io_ll_fresp_bits_uop_lrs3_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_ldst_val_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_uop_dst_rtype_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_uop_lrs1_rtype_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_uop_lrs2_rtype_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_frs3_en_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_fp_val_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_fp_single_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_xcpt_pf_if_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_xcpt_ae_if_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_xcpt_ma_if_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_bp_debug_if_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_uop_bp_xcpt_if_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_uop_debug_fsrc_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_uop_debug_tsrc_0; // @[execution-unit.scala:204:7] wire [3:0] io_ll_fresp_bits_fflags_bits_uop_ctrl_br_type_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_fflags_bits_uop_ctrl_op1_sel_0; // @[execution-unit.scala:204:7] wire [2:0] io_ll_fresp_bits_fflags_bits_uop_ctrl_op2_sel_0; // @[execution-unit.scala:204:7] wire [2:0] io_ll_fresp_bits_fflags_bits_uop_ctrl_imm_sel_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_fflags_bits_uop_ctrl_op_fcn_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:204:7] wire [2:0] io_ll_fresp_bits_fflags_bits_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_ctrl_is_load_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_ctrl_is_sta_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_ctrl_is_std_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_fflags_bits_uop_uopc_0; // @[execution-unit.scala:204:7] wire [31:0] io_ll_fresp_bits_fflags_bits_uop_inst_0; // @[execution-unit.scala:204:7] wire [31:0] io_ll_fresp_bits_fflags_bits_uop_debug_inst_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_is_rvc_0; // @[execution-unit.scala:204:7] wire [39:0] io_ll_fresp_bits_fflags_bits_uop_debug_pc_0; // @[execution-unit.scala:204:7] wire [2:0] io_ll_fresp_bits_fflags_bits_uop_iq_type_0; // @[execution-unit.scala:204:7] wire [9:0] io_ll_fresp_bits_fflags_bits_uop_fu_code_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_fflags_bits_uop_iw_state_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_iw_p1_poisoned_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_iw_p2_poisoned_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_is_br_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_is_jalr_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_is_jal_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_is_sfb_0; // @[execution-unit.scala:204:7] wire [15:0] io_ll_fresp_bits_fflags_bits_uop_br_mask_0; // @[execution-unit.scala:204:7] wire [3:0] io_ll_fresp_bits_fflags_bits_uop_br_tag_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_fflags_bits_uop_ftq_idx_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_edge_inst_0; // @[execution-unit.scala:204:7] wire [5:0] io_ll_fresp_bits_fflags_bits_uop_pc_lob_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_taken_0; // @[execution-unit.scala:204:7] wire [19:0] io_ll_fresp_bits_fflags_bits_uop_imm_packed_0; // @[execution-unit.scala:204:7] wire [11:0] io_ll_fresp_bits_fflags_bits_uop_csr_addr_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_fflags_bits_uop_rob_idx_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_fflags_bits_uop_ldq_idx_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_fflags_bits_uop_stq_idx_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_fflags_bits_uop_rxq_idx_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_fflags_bits_uop_pdst_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_fflags_bits_uop_prs1_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_fflags_bits_uop_prs2_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_fflags_bits_uop_prs3_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_fflags_bits_uop_ppred_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_prs1_busy_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_prs2_busy_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_prs3_busy_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_ppred_busy_0; // @[execution-unit.scala:204:7] wire [6:0] io_ll_fresp_bits_fflags_bits_uop_stale_pdst_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_exception_0; // @[execution-unit.scala:204:7] wire [63:0] io_ll_fresp_bits_fflags_bits_uop_exc_cause_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_bypassable_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_fflags_bits_uop_mem_cmd_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_fflags_bits_uop_mem_size_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_mem_signed_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_is_fence_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_is_fencei_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_is_amo_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_uses_ldq_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_uses_stq_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_is_sys_pc2epc_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_is_unique_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_flush_on_commit_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_ldst_is_rs1_0; // @[execution-unit.scala:204:7] wire [5:0] io_ll_fresp_bits_fflags_bits_uop_ldst_0; // @[execution-unit.scala:204:7] wire [5:0] io_ll_fresp_bits_fflags_bits_uop_lrs1_0; // @[execution-unit.scala:204:7] wire [5:0] io_ll_fresp_bits_fflags_bits_uop_lrs2_0; // @[execution-unit.scala:204:7] wire [5:0] io_ll_fresp_bits_fflags_bits_uop_lrs3_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_ldst_val_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_fflags_bits_uop_dst_rtype_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_fflags_bits_uop_lrs1_rtype_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_fflags_bits_uop_lrs2_rtype_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_frs3_en_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_fp_val_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_fp_single_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_xcpt_pf_if_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_xcpt_ae_if_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_xcpt_ma_if_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_bp_debug_if_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_bits_uop_bp_xcpt_if_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_fflags_bits_uop_debug_fsrc_0; // @[execution-unit.scala:204:7] wire [1:0] io_ll_fresp_bits_fflags_bits_uop_debug_tsrc_0; // @[execution-unit.scala:204:7] wire [4:0] io_ll_fresp_bits_fflags_bits_flags_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_fflags_valid_0; // @[execution-unit.scala:204:7] wire [64:0] io_ll_fresp_bits_data_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_bits_predicated_0; // @[execution-unit.scala:204:7] wire io_ll_fresp_valid_0; // @[execution-unit.scala:204:7] wire [3:0] io_bypass_0_bits_uop_ctrl_br_type_0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_uop_ctrl_op1_sel_0; // @[execution-unit.scala:204:7] wire [2:0] io_bypass_0_bits_uop_ctrl_op2_sel_0; // @[execution-unit.scala:204:7] wire [2:0] io_bypass_0_bits_uop_ctrl_imm_sel_0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_uop_ctrl_op_fcn_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:204:7] wire [2:0] io_bypass_0_bits_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_ctrl_is_load_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_ctrl_is_sta_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_ctrl_is_std_0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_uop_uopc_0; // @[execution-unit.scala:204:7] wire [31:0] io_bypass_0_bits_uop_inst_0; // @[execution-unit.scala:204:7] wire [31:0] io_bypass_0_bits_uop_debug_inst_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_is_rvc_0; // @[execution-unit.scala:204:7] wire [39:0] io_bypass_0_bits_uop_debug_pc_0; // @[execution-unit.scala:204:7] wire [2:0] io_bypass_0_bits_uop_iq_type_0; // @[execution-unit.scala:204:7] wire [9:0] io_bypass_0_bits_uop_fu_code_0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_uop_iw_state_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_iw_p1_poisoned_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_iw_p2_poisoned_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_is_br_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_is_jalr_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_is_jal_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_is_sfb_0; // @[execution-unit.scala:204:7] wire [15:0] io_bypass_0_bits_uop_br_mask_0; // @[execution-unit.scala:204:7] wire [3:0] io_bypass_0_bits_uop_br_tag_0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_uop_ftq_idx_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_edge_inst_0; // @[execution-unit.scala:204:7] wire [5:0] io_bypass_0_bits_uop_pc_lob_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_taken_0; // @[execution-unit.scala:204:7] wire [19:0] io_bypass_0_bits_uop_imm_packed_0; // @[execution-unit.scala:204:7] wire [11:0] io_bypass_0_bits_uop_csr_addr_0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_uop_rob_idx_0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_uop_ldq_idx_0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_uop_stq_idx_0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_uop_rxq_idx_0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_uop_pdst_0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_uop_prs1_0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_uop_prs2_0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_uop_prs3_0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_uop_ppred_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_prs1_busy_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_prs2_busy_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_prs3_busy_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_ppred_busy_0; // @[execution-unit.scala:204:7] wire [6:0] io_bypass_0_bits_uop_stale_pdst_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_exception_0; // @[execution-unit.scala:204:7] wire [63:0] io_bypass_0_bits_uop_exc_cause_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_bypassable_0; // @[execution-unit.scala:204:7] wire [4:0] io_bypass_0_bits_uop_mem_cmd_0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_uop_mem_size_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_mem_signed_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_is_fence_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_is_fencei_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_is_amo_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_uses_ldq_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_uses_stq_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_is_sys_pc2epc_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_is_unique_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_flush_on_commit_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_ldst_is_rs1_0; // @[execution-unit.scala:204:7] wire [5:0] io_bypass_0_bits_uop_ldst_0; // @[execution-unit.scala:204:7] wire [5:0] io_bypass_0_bits_uop_lrs1_0; // @[execution-unit.scala:204:7] wire [5:0] io_bypass_0_bits_uop_lrs2_0; // @[execution-unit.scala:204:7] wire [5:0] io_bypass_0_bits_uop_lrs3_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_ldst_val_0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_uop_dst_rtype_0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_uop_lrs1_rtype_0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_uop_lrs2_rtype_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_frs3_en_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_fp_val_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_fp_single_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_xcpt_pf_if_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_xcpt_ae_if_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_xcpt_ma_if_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_bp_debug_if_0; // @[execution-unit.scala:204:7] wire io_bypass_0_bits_uop_bp_xcpt_if_0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_uop_debug_fsrc_0; // @[execution-unit.scala:204:7] wire [1:0] io_bypass_0_bits_uop_debug_tsrc_0; // @[execution-unit.scala:204:7] wire [64:0] io_bypass_0_bits_data_0; // @[execution-unit.scala:204:7] wire io_bypass_0_valid_0; // @[execution-unit.scala:204:7] wire [3:0] io_brinfo_uop_ctrl_br_type_0; // @[execution-unit.scala:204:7] wire [1:0] io_brinfo_uop_ctrl_op1_sel_0; // @[execution-unit.scala:204:7] wire [2:0] io_brinfo_uop_ctrl_op2_sel_0; // @[execution-unit.scala:204:7] wire [2:0] io_brinfo_uop_ctrl_imm_sel_0; // @[execution-unit.scala:204:7] wire [4:0] io_brinfo_uop_ctrl_op_fcn_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:204:7] wire [2:0] io_brinfo_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_ctrl_is_load_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_ctrl_is_sta_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_ctrl_is_std_0; // @[execution-unit.scala:204:7] wire [6:0] io_brinfo_uop_uopc_0; // @[execution-unit.scala:204:7] wire [31:0] io_brinfo_uop_inst_0; // @[execution-unit.scala:204:7] wire [31:0] io_brinfo_uop_debug_inst_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_is_rvc_0; // @[execution-unit.scala:204:7] wire [39:0] io_brinfo_uop_debug_pc_0; // @[execution-unit.scala:204:7] wire [2:0] io_brinfo_uop_iq_type_0; // @[execution-unit.scala:204:7] wire [9:0] io_brinfo_uop_fu_code_0; // @[execution-unit.scala:204:7] wire [1:0] io_brinfo_uop_iw_state_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_iw_p1_poisoned_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_iw_p2_poisoned_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_is_br_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_is_jalr_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_is_jal_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_is_sfb_0; // @[execution-unit.scala:204:7] wire [15:0] io_brinfo_uop_br_mask_0; // @[execution-unit.scala:204:7] wire [3:0] io_brinfo_uop_br_tag_0; // @[execution-unit.scala:204:7] wire [4:0] io_brinfo_uop_ftq_idx_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_edge_inst_0; // @[execution-unit.scala:204:7] wire [5:0] io_brinfo_uop_pc_lob_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_taken_0; // @[execution-unit.scala:204:7] wire [19:0] io_brinfo_uop_imm_packed_0; // @[execution-unit.scala:204:7] wire [11:0] io_brinfo_uop_csr_addr_0; // @[execution-unit.scala:204:7] wire [6:0] io_brinfo_uop_rob_idx_0; // @[execution-unit.scala:204:7] wire [4:0] io_brinfo_uop_ldq_idx_0; // @[execution-unit.scala:204:7] wire [4:0] io_brinfo_uop_stq_idx_0; // @[execution-unit.scala:204:7] wire [1:0] io_brinfo_uop_rxq_idx_0; // @[execution-unit.scala:204:7] wire [6:0] io_brinfo_uop_pdst_0; // @[execution-unit.scala:204:7] wire [6:0] io_brinfo_uop_prs1_0; // @[execution-unit.scala:204:7] wire [6:0] io_brinfo_uop_prs2_0; // @[execution-unit.scala:204:7] wire [6:0] io_brinfo_uop_prs3_0; // @[execution-unit.scala:204:7] wire [4:0] io_brinfo_uop_ppred_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_prs1_busy_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_prs2_busy_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_prs3_busy_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_ppred_busy_0; // @[execution-unit.scala:204:7] wire [6:0] io_brinfo_uop_stale_pdst_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_exception_0; // @[execution-unit.scala:204:7] wire [63:0] io_brinfo_uop_exc_cause_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_bypassable_0; // @[execution-unit.scala:204:7] wire [4:0] io_brinfo_uop_mem_cmd_0; // @[execution-unit.scala:204:7] wire [1:0] io_brinfo_uop_mem_size_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_mem_signed_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_is_fence_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_is_fencei_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_is_amo_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_uses_ldq_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_uses_stq_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_is_sys_pc2epc_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_is_unique_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_flush_on_commit_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_ldst_is_rs1_0; // @[execution-unit.scala:204:7] wire [5:0] io_brinfo_uop_ldst_0; // @[execution-unit.scala:204:7] wire [5:0] io_brinfo_uop_lrs1_0; // @[execution-unit.scala:204:7] wire [5:0] io_brinfo_uop_lrs2_0; // @[execution-unit.scala:204:7] wire [5:0] io_brinfo_uop_lrs3_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_ldst_val_0; // @[execution-unit.scala:204:7] wire [1:0] io_brinfo_uop_dst_rtype_0; // @[execution-unit.scala:204:7] wire [1:0] io_brinfo_uop_lrs1_rtype_0; // @[execution-unit.scala:204:7] wire [1:0] io_brinfo_uop_lrs2_rtype_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_frs3_en_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_fp_val_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_fp_single_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_xcpt_pf_if_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_xcpt_ae_if_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_xcpt_ma_if_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_bp_debug_if_0; // @[execution-unit.scala:204:7] wire io_brinfo_uop_bp_xcpt_if_0; // @[execution-unit.scala:204:7] wire [1:0] io_brinfo_uop_debug_fsrc_0; // @[execution-unit.scala:204:7] wire [1:0] io_brinfo_uop_debug_tsrc_0; // @[execution-unit.scala:204:7] wire io_brinfo_valid_0; // @[execution-unit.scala:204:7] wire io_brinfo_mispredict_0; // @[execution-unit.scala:204:7] wire io_brinfo_taken_0; // @[execution-unit.scala:204:7] wire [2:0] io_brinfo_cfi_type_0; // @[execution-unit.scala:204:7] wire [1:0] io_brinfo_pc_sel_0; // @[execution-unit.scala:204:7] wire [20:0] io_brinfo_target_offset_0; // @[execution-unit.scala:204:7] wire [9:0] io_fu_types_0; // @[execution-unit.scala:204:7] wire _ifpu_busy_T; // @[execution-unit.scala:358:18] wire ifpu_busy; // @[execution-unit.scala:254:27] wire _io_fu_types_T_11 = ~ifpu_busy; // @[execution-unit.scala:254:27, :265:22] wire _io_fu_types_T_12 = _io_fu_types_T_11; // @[execution-unit.scala:265:{22,33}] wire [9:0] _io_fu_types_T_13 = {1'h0, _io_fu_types_T_12, 8'h0}; // @[execution-unit.scala:265:{21,33}] wire [9:0] _io_fu_types_T_14 = _io_fu_types_T_13 | 10'h21; // @[execution-unit.scala:264:49, :265:21] assign _io_fu_types_T_16 = _io_fu_types_T_14; // @[execution-unit.scala:264:49, :265:60] assign io_fu_types_0 = _io_fu_types_T_16; // @[execution-unit.scala:204:7, :265:60] assign io_bypass_0_bits_data_0 = {1'h0, _ALUUnit_io_bypass_0_bits_data}; // @[execution-unit.scala:204:7, :271:17, :293:15] assign _ifpu_busy_T = ~_queue_io_empty; // @[execution-unit.scala:347:23, :358:18] assign ifpu_busy = _ifpu_busy_T; // @[execution-unit.scala:254:27, :358:18]
Generate the Verilog code corresponding to the following Chisel files. File AsyncResetReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ /** This black-boxes an Async Reset * (or Set) * Register. * * Because Chisel doesn't support * parameterized black boxes, * we unfortunately have to * instantiate a number of these. * * We also have to hard-code the set/ * reset behavior. * * Do not confuse an asynchronous * reset signal with an asynchronously * reset reg. You should still * properly synchronize your reset * deassertion. * * @param d Data input * @param q Data Output * @param clk Clock Input * @param rst Reset Input * @param en Write Enable Input * */ class AsyncResetReg(resetValue: Int = 0) extends RawModule { val io = IO(new Bundle { val d = Input(Bool()) val q = Output(Bool()) val en = Input(Bool()) val clk = Input(Clock()) val rst = Input(Reset()) }) val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W))) when (io.en) { reg := io.d } io.q := reg } class SimpleRegIO(val w: Int) extends Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) } class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module { override def desiredName = s"AsyncResetRegVec_w${w}_i${init}" val io = IO(new SimpleRegIO(w)) val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W))) when (io.en) { reg := io.d } io.q := reg } object AsyncResetReg { // Create Single Registers def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = { val reg = Module(new AsyncResetReg(if (init) 1 else 0)) reg.io.d := d reg.io.clk := clk reg.io.rst := rst reg.io.en := true.B name.foreach(reg.suggestName(_)) reg.io.q } def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None) def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name)) // Create Vectors of Registers def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = { val w = updateData.getWidth max resetData.bitLength val reg = Module(new AsyncResetRegVec(w, resetData)) name.foreach(reg.suggestName(_)) reg.io.d := updateData reg.io.en := enable reg.io.q } def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData, resetData, enable, Some(name)) def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B) def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name)) def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable) def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name)) def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B) def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name)) }
module AsyncResetRegVec_w1_i0_6( // @[AsyncResetReg.scala:56:7] input clock, // @[AsyncResetReg.scala:56:7] input reset, // @[AsyncResetReg.scala:56:7] input io_d, // @[AsyncResetReg.scala:59:14] output io_q, // @[AsyncResetReg.scala:59:14] input io_en // @[AsyncResetReg.scala:59:14] ); wire io_d_0 = io_d; // @[AsyncResetReg.scala:56:7] wire io_en_0 = io_en; // @[AsyncResetReg.scala:56:7] wire _reg_T = reset; // @[AsyncResetReg.scala:61:29] wire io_q_0; // @[AsyncResetReg.scala:56:7] reg reg_0; // @[AsyncResetReg.scala:61:50] assign io_q_0 = reg_0; // @[AsyncResetReg.scala:56:7, :61:50] always @(posedge clock or posedge _reg_T) begin // @[AsyncResetReg.scala:56:7, :61:29] if (_reg_T) // @[AsyncResetReg.scala:56:7, :61:29] reg_0 <= 1'h0; // @[AsyncResetReg.scala:61:50] else if (io_en_0) // @[AsyncResetReg.scala:56:7] reg_0 <= io_d_0; // @[AsyncResetReg.scala:56:7, :61:50] always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File RecFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import consts._ class RecFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val in = Input(Bits((inExpWidth + inSigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in); if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- io.out := io.in<<(outSigWidth - inSigWidth) io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W) } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( inExpWidth, inSigWidth, outExpWidth, outSigWidth, flRoundOpt_sigMSBitAlwaysZero )) roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn) roundAnyRawFNToRecFN.io.infiniteExc := false.B roundAnyRawFNToRecFN.io.in := rawIn roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags } } File rawFloatFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ /*---------------------------------------------------------------------------- | In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be | set. *----------------------------------------------------------------------------*/ object rawFloatFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat = { val exp = in(expWidth + sigWidth - 1, sigWidth - 1) val isZero = exp(expWidth, expWidth - 2) === 0.U val isSpecial = exp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && exp(expWidth - 2) out.isInf := isSpecial && ! exp(expWidth - 2) out.isZero := isZero out.sign := in(expWidth + sigWidth) out.sExp := exp.zext out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0) out } }
module RecFNToRecFN_24( // @[RecFNToRecFN.scala:44:5] input [32:0] io_in, // @[RecFNToRecFN.scala:48:16] output [32:0] io_out // @[RecFNToRecFN.scala:48:16] ); wire [32:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5] wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16] wire [2:0] io_roundingMode = 3'h0; // @[RecFNToRecFN.scala:44:5, :48:16] wire [32:0] _io_out_T = io_in_0; // @[RecFNToRecFN.scala:44:5, :64:35] wire [4:0] _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:65:54] wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5] wire [4:0] io_exceptionFlags; // @[RecFNToRecFN.scala:44:5] wire [8:0] rawIn_exp = io_in_0[31:23]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawIn_isZero_T = rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _rawIn_isSpecial_T = rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [9:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [24:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23] wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23] wire _rawIn_out_isNaN_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _rawIn_out_isInf_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _rawIn_out_sign_T = io_in_0[32]; // @[rawFloatFromRecFN.scala:59:25] assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [22:0] _rawIn_out_sig_T_2 = io_in_0[22:0]; // @[rawFloatFromRecFN.scala:61:49] assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] assign io_out_0 = _io_out_T; // @[RecFNToRecFN.scala:44:5, :64:35] wire _io_exceptionFlags_T = rawIn_sig[22]; // @[rawFloatFromRecFN.scala:55:23] wire _io_exceptionFlags_T_1 = ~_io_exceptionFlags_T; // @[common.scala:82:{49,56}] wire _io_exceptionFlags_T_2 = rawIn_isNaN & _io_exceptionFlags_T_1; // @[rawFloatFromRecFN.scala:55:23] assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, 4'h0}; // @[common.scala:82:46] assign io_exceptionFlags = _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:44:5, :65:54] assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Breakpoint.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util.{Cat} import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.tile.{CoreBundle, HasCoreParameters} import freechips.rocketchip.util._ class BPControl(implicit p: Parameters) extends CoreBundle()(p) { val ttype = UInt(4.W) val dmode = Bool() val maskmax = UInt(6.W) val reserved = UInt((xLen - (if (coreParams.useBPWatch) 26 else 24)).W) val action = UInt((if (coreParams.useBPWatch) 3 else 1).W) val chain = Bool() val zero = UInt(2.W) val tmatch = UInt(2.W) val m = Bool() val h = Bool() val s = Bool() val u = Bool() val x = Bool() val w = Bool() val r = Bool() def tType = 2 def maskMax = 4 def enabled(mstatus: MStatus) = !mstatus.debug && Cat(m, h, s, u)(mstatus.prv) } class TExtra(implicit p: Parameters) extends CoreBundle()(p) { def mvalueBits: Int = if (xLen == 32) coreParams.mcontextWidth min 6 else coreParams.mcontextWidth min 13 def svalueBits: Int = if (xLen == 32) coreParams.scontextWidth min 16 else coreParams.scontextWidth min 34 def mselectPos: Int = if (xLen == 32) 25 else 50 def mvaluePos : Int = mselectPos + 1 def sselectPos: Int = 0 def svaluePos : Int = 2 val mvalue = UInt(mvalueBits.W) val mselect = Bool() val pad2 = UInt((mselectPos - svalueBits - 2).W) val svalue = UInt(svalueBits.W) val pad1 = UInt(1.W) val sselect = Bool() } class BP(implicit p: Parameters) extends CoreBundle()(p) { val control = new BPControl val address = UInt(vaddrBits.W) val textra = new TExtra def contextMatch(mcontext: UInt, scontext: UInt) = (if (coreParams.mcontextWidth > 0) (!textra.mselect || (mcontext(textra.mvalueBits-1,0) === textra.mvalue)) else true.B) && (if (coreParams.scontextWidth > 0) (!textra.sselect || (scontext(textra.svalueBits-1,0) === textra.svalue)) else true.B) def mask(dummy: Int = 0) = (0 until control.maskMax-1).scanLeft(control.tmatch(0))((m, i) => m && address(i)).asUInt def pow2AddressMatch(x: UInt) = (~x | mask()) === (~address | mask()) def rangeAddressMatch(x: UInt) = (x >= address) ^ control.tmatch(0) def addressMatch(x: UInt) = Mux(control.tmatch(1), rangeAddressMatch(x), pow2AddressMatch(x)) } class BPWatch (val n: Int) extends Bundle() { val valid = Vec(n, Bool()) val rvalid = Vec(n, Bool()) val wvalid = Vec(n, Bool()) val ivalid = Vec(n, Bool()) val action = UInt(3.W) } class BreakpointUnit(n: Int)(implicit val p: Parameters) extends Module with HasCoreParameters { val io = IO(new Bundle { val status = Input(new MStatus()) val bp = Input(Vec(n, new BP)) val pc = Input(UInt(vaddrBits.W)) val ea = Input(UInt(vaddrBits.W)) val mcontext = Input(UInt(coreParams.mcontextWidth.W)) val scontext = Input(UInt(coreParams.scontextWidth.W)) val xcpt_if = Output(Bool()) val xcpt_ld = Output(Bool()) val xcpt_st = Output(Bool()) val debug_if = Output(Bool()) val debug_ld = Output(Bool()) val debug_st = Output(Bool()) val bpwatch = Output(Vec(n, new BPWatch(1))) }) io.xcpt_if := false.B io.xcpt_ld := false.B io.xcpt_st := false.B io.debug_if := false.B io.debug_ld := false.B io.debug_st := false.B (io.bpwatch zip io.bp).foldLeft((true.B, true.B, true.B)) { case ((ri, wi, xi), (bpw, bp)) => val en = bp.control.enabled(io.status) val cx = bp.contextMatch(io.mcontext, io.scontext) val r = en && bp.control.r && bp.addressMatch(io.ea) && cx val w = en && bp.control.w && bp.addressMatch(io.ea) && cx val x = en && bp.control.x && bp.addressMatch(io.pc) && cx val end = !bp.control.chain val action = bp.control.action bpw.action := action bpw.valid(0) := false.B bpw.rvalid(0) := false.B bpw.wvalid(0) := false.B bpw.ivalid(0) := false.B when (end && r && ri) { io.xcpt_ld := (action === 0.U); io.debug_ld := (action === 1.U); bpw.valid(0) := true.B; bpw.rvalid(0) := true.B } when (end && w && wi) { io.xcpt_st := (action === 0.U); io.debug_st := (action === 1.U); bpw.valid(0) := true.B; bpw.wvalid(0) := true.B } when (end && x && xi) { io.xcpt_if := (action === 0.U); io.debug_if := (action === 1.U); bpw.valid(0) := true.B; bpw.ivalid(0) := true.B } (end || r, end || w, end || x) } }
module BreakpointUnit( // @[Breakpoint.scala:79:7] input clock, // @[Breakpoint.scala:79:7] input reset, // @[Breakpoint.scala:79:7] input [32:0] io_ea // @[Breakpoint.scala:80:14] ); wire [32:0] io_ea_0 = io_ea; // @[Breakpoint.scala:79:7] wire [32:0] io_pc = 33'h0; // @[Breakpoint.scala:79:7, :80:14] wire [7:0] io_status_zero1 = 8'h0; // @[Breakpoint.scala:79:7, :80:14] wire [22:0] io_status_zero2 = 23'h0; // @[Breakpoint.scala:79:7, :80:14] wire [1:0] io_status_dprv = 2'h0; // @[Breakpoint.scala:79:7, :80:14] wire [1:0] io_status_prv = 2'h0; // @[Breakpoint.scala:79:7, :80:14] wire [1:0] io_status_sxl = 2'h0; // @[Breakpoint.scala:79:7, :80:14] wire [1:0] io_status_uxl = 2'h0; // @[Breakpoint.scala:79:7, :80:14] wire [1:0] io_status_xs = 2'h0; // @[Breakpoint.scala:79:7, :80:14] wire [1:0] io_status_fs = 2'h0; // @[Breakpoint.scala:79:7, :80:14] wire [1:0] io_status_mpp = 2'h0; // @[Breakpoint.scala:79:7, :80:14] wire [1:0] io_status_vs = 2'h0; // @[Breakpoint.scala:79:7, :80:14] wire [31:0] io_status_isa = 32'h0; // @[Breakpoint.scala:79:7, :80:14] wire io_status_debug = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_cease = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_wfi = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_dv = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_v = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_sd = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_mpv = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_gva = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_mbe = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_sbe = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_sd_rv32 = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_tsr = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_tw = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_tvm = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_mxr = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_sum = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_mprv = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_spp = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_mpie = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_ube = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_spie = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_upie = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_mie = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_hie = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_sie = 1'h0; // @[Breakpoint.scala:79:7] wire io_status_uie = 1'h0; // @[Breakpoint.scala:79:7] wire io_xcpt_if = 1'h0; // @[Breakpoint.scala:79:7] wire io_xcpt_ld = 1'h0; // @[Breakpoint.scala:79:7] wire io_xcpt_st = 1'h0; // @[Breakpoint.scala:79:7] wire io_debug_if = 1'h0; // @[Breakpoint.scala:79:7] wire io_debug_ld = 1'h0; // @[Breakpoint.scala:79:7] wire io_debug_st = 1'h0; // @[Breakpoint.scala:79:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RecFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import consts._ class RecFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val in = Input(Bits((inExpWidth + inSigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in); if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- io.out := io.in<<(outSigWidth - inSigWidth) io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W) } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( inExpWidth, inSigWidth, outExpWidth, outSigWidth, flRoundOpt_sigMSBitAlwaysZero )) roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn) roundAnyRawFNToRecFN.io.infiniteExc := false.B roundAnyRawFNToRecFN.io.in := rawIn roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags } } File rawFloatFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ /*---------------------------------------------------------------------------- | In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be | set. *----------------------------------------------------------------------------*/ object rawFloatFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat = { val exp = in(expWidth + sigWidth - 1, sigWidth - 1) val isZero = exp(expWidth, expWidth - 2) === 0.U val isSpecial = exp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && exp(expWidth - 2) out.isInf := isSpecial && ! exp(expWidth - 2) out.isZero := isZero out.sign := in(expWidth + sigWidth) out.sExp := exp.zext out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0) out } }
module RecFNToRecFN_236( // @[RecFNToRecFN.scala:44:5] input [32:0] io_in, // @[RecFNToRecFN.scala:48:16] output [32:0] io_out // @[RecFNToRecFN.scala:48:16] ); wire [32:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5] wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16] wire [2:0] io_roundingMode = 3'h0; // @[RecFNToRecFN.scala:44:5, :48:16] wire [32:0] _io_out_T = io_in_0; // @[RecFNToRecFN.scala:44:5, :64:35] wire [4:0] _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:65:54] wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5] wire [4:0] io_exceptionFlags; // @[RecFNToRecFN.scala:44:5] wire [8:0] rawIn_exp = io_in_0[31:23]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawIn_isZero_T = rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _rawIn_isSpecial_T = rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [9:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [24:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23] wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23] wire _rawIn_out_isNaN_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _rawIn_out_isInf_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _rawIn_out_sign_T = io_in_0[32]; // @[rawFloatFromRecFN.scala:59:25] assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [22:0] _rawIn_out_sig_T_2 = io_in_0[22:0]; // @[rawFloatFromRecFN.scala:61:49] assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] assign io_out_0 = _io_out_T; // @[RecFNToRecFN.scala:44:5, :64:35] wire _io_exceptionFlags_T = rawIn_sig[22]; // @[rawFloatFromRecFN.scala:55:23] wire _io_exceptionFlags_T_1 = ~_io_exceptionFlags_T; // @[common.scala:82:{49,56}] wire _io_exceptionFlags_T_2 = rawIn_isNaN & _io_exceptionFlags_T_1; // @[rawFloatFromRecFN.scala:55:23] assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, 4'h0}; // @[common.scala:82:46] assign io_exceptionFlags = _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:44:5, :65:54] assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Tile.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ import Util._ /** * A Tile is a purely combinational 2D array of passThrough PEs. * a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs * @param width The data width of each PE in bits * @param rows Number of PEs on each row * @param columns Number of PEs on each column */ class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module { val io = IO(new Bundle { val in_a = Input(Vec(rows, inputType)) val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it val in_d = Input(Vec(columns, outputType)) val in_control = Input(Vec(columns, new PEControl(accType))) val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val in_last = Input(Vec(columns, Bool())) val out_a = Output(Vec(rows, inputType)) val out_c = Output(Vec(columns, outputType)) val out_b = Output(Vec(columns, outputType)) val out_control = Output(Vec(columns, new PEControl(accType))) val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val out_last = Output(Vec(columns, Bool())) val in_valid = Input(Vec(columns, Bool())) val out_valid = Output(Vec(columns, Bool())) val bad_dataflow = Output(Bool()) }) import ev._ val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls))) val tileT = tile.transpose // TODO: abstract hori/vert broadcast, all these connections look the same // Broadcast 'a' horizontally across the Tile for (r <- 0 until rows) { tile(r).foldLeft(io.in_a(r)) { case (in_a, pe) => pe.io.in_a := in_a pe.io.out_a } } // Broadcast 'b' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_b(c)) { case (in_b, pe) => pe.io.in_b := (if (tree_reduction) in_b.zero else in_b) pe.io.out_b } } // Broadcast 'd' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_d(c)) { case (in_d, pe) => pe.io.in_d := in_d pe.io.out_c } } // Broadcast 'control' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_control(c)) { case (in_ctrl, pe) => pe.io.in_control := in_ctrl pe.io.out_control } } // Broadcast 'garbage' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_valid(c)) { case (v, pe) => pe.io.in_valid := v pe.io.out_valid } } // Broadcast 'id' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_id(c)) { case (id, pe) => pe.io.in_id := id pe.io.out_id } } // Broadcast 'last' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_last(c)) { case (last, pe) => pe.io.in_last := last pe.io.out_last } } // Drive the Tile's bottom IO for (c <- 0 until columns) { io.out_c(c) := tile(rows-1)(c).io.out_c io.out_control(c) := tile(rows-1)(c).io.out_control io.out_id(c) := tile(rows-1)(c).io.out_id io.out_last(c) := tile(rows-1)(c).io.out_last io.out_valid(c) := tile(rows-1)(c).io.out_valid io.out_b(c) := { if (tree_reduction) { val prods = tileT(c).map(_.io.out_b) accumulateTree(prods :+ io.in_b(c)) } else { tile(rows - 1)(c).io.out_b } } } io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_) // Drive the Tile's right IO for (r <- 0 until rows) { io.out_a(r) := tile(r)(columns-1).io.out_a } }
module Tile_12( // @[Tile.scala:16:7] input clock, // @[Tile.scala:16:7] input reset, // @[Tile.scala:16:7] input [7:0] io_in_a_0, // @[Tile.scala:17:14] input [19:0] io_in_b_0, // @[Tile.scala:17:14] input [19:0] io_in_d_0, // @[Tile.scala:17:14] input io_in_control_0_dataflow, // @[Tile.scala:17:14] input io_in_control_0_propagate, // @[Tile.scala:17:14] input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14] input [2:0] io_in_id_0, // @[Tile.scala:17:14] input io_in_last_0, // @[Tile.scala:17:14] output [7:0] io_out_a_0, // @[Tile.scala:17:14] output [19:0] io_out_c_0, // @[Tile.scala:17:14] output [19:0] io_out_b_0, // @[Tile.scala:17:14] output io_out_control_0_dataflow, // @[Tile.scala:17:14] output io_out_control_0_propagate, // @[Tile.scala:17:14] output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14] output [2:0] io_out_id_0, // @[Tile.scala:17:14] output io_out_last_0, // @[Tile.scala:17:14] input io_in_valid_0, // @[Tile.scala:17:14] output io_out_valid_0 // @[Tile.scala:17:14] ); wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7] wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7] wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7] wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7] wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7] wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7] wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7] wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7] wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7] wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44] wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7] wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7] wire io_out_control_0_propagate_0; // @[Tile.scala:16:7] wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7] wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7] wire io_out_last_0_0; // @[Tile.scala:16:7] wire io_out_valid_0_0; // @[Tile.scala:16:7] PE_268 tile_0_0 ( // @[Tile.scala:42:44] .clock (clock), .reset (reset), .io_in_a (io_in_a_0_0), // @[Tile.scala:16:7] .io_in_b (io_in_b_0_0), // @[Tile.scala:16:7] .io_in_d (io_in_d_0_0), // @[Tile.scala:16:7] .io_out_a (io_out_a_0_0), .io_out_b (io_out_b_0_0), .io_out_c (io_out_c_0_0), .io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7] .io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7] .io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7] .io_out_control_dataflow (io_out_control_0_dataflow_0), .io_out_control_propagate (io_out_control_0_propagate_0), .io_out_control_shift (io_out_control_0_shift_0), .io_in_id (io_in_id_0_0), // @[Tile.scala:16:7] .io_out_id (io_out_id_0_0), .io_in_last (io_in_last_0_0), // @[Tile.scala:16:7] .io_out_last (io_out_last_0_0), .io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7] .io_out_valid (io_out_valid_0_0) ); // @[Tile.scala:42:44] assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7] assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7] assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7] assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7] assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7] assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7] assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7] assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7] assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_57( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [11:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [11:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [11:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [1:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [11:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [11:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [11:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10] wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire a_first_count = 1'h0; // @[Edges.scala:234:25] wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire d_first_count = 1'h0; // @[Edges.scala:234:25] wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59] wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14] wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25] wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14] wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27] wire c_first_count = 1'h0; // @[Edges.scala:234:25] wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21] wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14] wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67] wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire a_first_last = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire d_first_last = 1'h1; // @[Edges.scala:232:33] wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43] wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33] wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28] wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7] wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [11:0] _c_first_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_first_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_first_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_first_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_first_WIRE_2_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_first_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_first_WIRE_3_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_first_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_set_wo_ready_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_set_wo_ready_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_set_wo_ready_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_set_wo_ready_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_set_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_set_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_opcodes_set_interm_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_opcodes_set_interm_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_opcodes_set_interm_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_opcodes_set_interm_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_sizes_set_interm_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_sizes_set_interm_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_sizes_set_interm_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_sizes_set_interm_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_opcodes_set_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_opcodes_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_opcodes_set_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_opcodes_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_sizes_set_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_sizes_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_sizes_set_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_sizes_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_probe_ack_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_probe_ack_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_probe_ack_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_probe_ack_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_probe_ack_WIRE_2_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_probe_ack_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_probe_ack_WIRE_3_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_probe_ack_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _same_cycle_resp_WIRE_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _same_cycle_resp_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _same_cycle_resp_WIRE_1_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _same_cycle_resp_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _same_cycle_resp_WIRE_2_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _same_cycle_resp_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _same_cycle_resp_WIRE_3_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _same_cycle_resp_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _same_cycle_resp_WIRE_4_bits_source = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _same_cycle_resp_WIRE_4_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _same_cycle_resp_WIRE_5_bits_source = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _same_cycle_resp_WIRE_5_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_beats1_decode_T_2 = 3'h0; // @[package.scala:243:46] wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [32769:0] _c_sizes_set_T_1 = 32770'h0; // @[Monitor.scala:768:52] wire [14:0] _c_opcodes_set_T = 15'h0; // @[Monitor.scala:767:79] wire [14:0] _c_sizes_set_T = 15'h0; // @[Monitor.scala:768:77] wire [32770:0] _c_opcodes_set_T_1 = 32771'h0; // @[Monitor.scala:767:54] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [4095:0] _c_set_wo_ready_T = 4096'h1; // @[OneHot.scala:58:35] wire [4095:0] _c_set_T = 4096'h1; // @[OneHot.scala:58:35] wire [8255:0] c_opcodes_set = 8256'h0; // @[Monitor.scala:740:34] wire [8255:0] c_sizes_set = 8256'h0; // @[Monitor.scala:741:34] wire [2063:0] c_set = 2064'h0; // @[Monitor.scala:738:34] wire [2063:0] c_set_wo_ready = 2064'h0; // @[Monitor.scala:739:34] wire [2:0] _c_first_beats1_decode_T_1 = 3'h7; // @[package.scala:243:76] wire [5:0] _c_first_beats1_decode_T = 6'h7; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [11:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [11:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [11:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [11:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [11:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [11:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [11:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [11:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [11:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [11:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [11:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [11:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_4 = source_ok_uncommonBits < 12'h810; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20] wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31] wire [5:0] _GEN = 6'h7 << io_in_a_bits_size_0; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [5:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [5:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [2:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [11:0] _is_aligned_T = {9'h0, io_in_a_bits_address_0[2:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 12'h0; // @[Edges.scala:21:{16,24}] wire [2:0] _mask_sizeOH_T = {1'h0, io_in_a_bits_size_0}; // @[Misc.scala:202:34] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = &io_in_a_bits_size_0; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [11:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [11:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire [11:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}] wire [11:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}] wire [11:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}] wire [11:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}] wire [11:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}] wire [11:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}] wire [11:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}] wire [11:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_10 = source_ok_uncommonBits_1 < 12'h810; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20] wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31] wire _T_665 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_665; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_665; // @[Decoupled.scala:51:35] wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] reg a_first_counter; // @[Edges.scala:229:27] wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25] wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28] wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28] wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25] wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [1:0] size; // @[Monitor.scala:389:22] reg [11:0] source; // @[Monitor.scala:390:22] reg [11:0] address; // @[Monitor.scala:391:22] wire _T_733 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_733; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_733; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_733; // @[Decoupled.scala:51:35] wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35] wire [5:0] _GEN_0 = 6'h7 << io_in_d_bits_size_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [2:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] reg d_first_counter; // @[Edges.scala:229:27] wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28] wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] size_1; // @[Monitor.scala:540:22] reg [11:0] source_1; // @[Monitor.scala:541:22] reg [2063:0] inflight; // @[Monitor.scala:614:27] reg [8255:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [8255:0] inflight_sizes; // @[Monitor.scala:618:33] wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] reg a_first_counter_1; // @[Edges.scala:229:27] wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25] wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28] wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28] wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25] wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] reg d_first_counter_1; // @[Edges.scala:229:27] wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28] wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire [2063:0] a_set; // @[Monitor.scala:626:34] wire [2063:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [8255:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [8255:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [14:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [14:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [14:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [14:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [14:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [14:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [14:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [14:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [14:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [8255:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [8255:0] _a_opcode_lookup_T_6 = {8252'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [8255:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[8255:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [8255:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [8255:0] _a_size_lookup_T_6 = {8252'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [8255:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[8255:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [4095:0] _GEN_2 = 4096'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [4095:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [4095:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[2063:0] : 2064'h0; // @[OneHot.scala:58:35] wire _T_598 = _T_665 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_598 ? _a_set_T[2063:0] : 2064'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [2:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [2:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[2:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 3'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [14:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [14:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [14:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [32770:0] _a_opcodes_set_T_1 = {32767'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[8255:0] : 8256'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [32769:0] _a_sizes_set_T_1 = {32767'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[8255:0] : 8256'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [2063:0] d_clr; // @[Monitor.scala:664:34] wire [2063:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [8255:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [8255:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [4095:0] _GEN_5 = 4096'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [4095:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [4095:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [4095:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [4095:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[2063:0] : 2064'h0; // @[OneHot.scala:58:35] wire _T_613 = _T_733 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_613 ? _d_clr_T[2063:0] : 2064'h0; // @[OneHot.scala:58:35] wire [32782:0] _d_opcodes_clr_T_5 = 32783'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[8255:0] : 8256'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [32782:0] _d_sizes_clr_T_5 = 32783'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[8255:0] : 8256'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [2063:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [2063:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [2063:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [8255:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [8255:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [8255:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [8255:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [8255:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [8255:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [2063:0] inflight_1; // @[Monitor.scala:726:35] wire [2063:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [8255:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [8255:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [8255:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [8255:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] reg d_first_counter_2; // @[Edges.scala:229:27] wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28] wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [8255:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [8255:0] _c_opcode_lookup_T_6 = {8252'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [8255:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[8255:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [8255:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [8255:0] _c_size_lookup_T_6 = {8252'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [8255:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[8255:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [2063:0] d_clr_1; // @[Monitor.scala:774:34] wire [2063:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [8255:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [8255:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_709 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_709 & d_release_ack_1 ? _d_clr_wo_ready_T_1[2063:0] : 2064'h0; // @[OneHot.scala:58:35] wire _T_691 = _T_733 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_691 ? _d_clr_T_1[2063:0] : 2064'h0; // @[OneHot.scala:58:35] wire [32782:0] _d_opcodes_clr_T_11 = 32783'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_691 ? _d_opcodes_clr_T_11[8255:0] : 8256'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [32782:0] _d_sizes_clr_T_11 = 32783'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_691 ? _d_sizes_clr_T_11[8255:0] : 8256'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 12'h0; // @[Monitor.scala:36:7, :795:113] wire [2063:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [2063:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [8255:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [8255:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [8255:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [8255:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File primitives.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object lowMask { def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt = { require(topBound != bottomBound) val numInVals = BigInt(1)<<in.getWidth if (topBound < bottomBound) { lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound) } else if (numInVals > 64 /* Empirical */) { // For simulation performance, we should avoid generating // exteremely wide shifters, so we divide and conquer. // Empirically, this does not impact synthesis QoR. val mid = numInVals / 2 val msb = in(in.getWidth - 1) val lsbs = in(in.getWidth - 2, 0) if (mid < topBound) { if (mid <= bottomBound) { Mux(msb, lowMask(lsbs, topBound - mid, bottomBound - mid), 0.U ) } else { Mux(msb, lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U, lowMask(lsbs, mid, bottomBound) ) } } else { ~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound)) } } else { val shift = (BigInt(-1)<<numInVals.toInt).S>>in Reverse( shift( (numInVals - 1 - bottomBound).toInt, (numInVals - topBound).toInt ) ) } } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object countLeadingZeros { def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy2 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 1)>>1 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 2).orR reducedVec.asUInt } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy4 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 3)>>2 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 4).orR reducedVec.asUInt } } File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_82( // @[RoundAnyRawFNToRecFN.scala:48:5] input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16] input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16] input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16] output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16] ); wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19] wire [15:0] _roundMask_T_5 = 16'hFF; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_4 = 16'hFF00; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_10 = 16'hFF00; // @[primitives.scala:77:20] wire [11:0] _roundMask_T_13 = 12'hFF; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_14 = 16'hFF0; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_15 = 16'hF0F; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_20 = 16'hF0F0; // @[primitives.scala:77:20] wire [13:0] _roundMask_T_23 = 14'hF0F; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_24 = 16'h3C3C; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_25 = 16'h3333; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_30 = 16'hCCCC; // @[primitives.scala:77:20] wire [14:0] _roundMask_T_33 = 15'h3333; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_34 = 16'h6666; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_35 = 16'h5555; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_40 = 16'hAAAA; // @[primitives.scala:77:20] wire [25:0] _roundedSig_T_15 = 26'h0; // @[RoundAnyRawFNToRecFN.scala:181:24] wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14] wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14] wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:257:18] wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:261:18] wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:269:16] wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:273:16] wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:284:13] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5] wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:90:53] wire _roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:169:38] wire _unboundedRange_roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:207:38] wire _common_underflow_T_7 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:222:49] wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:32] wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:60] wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53] wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53] wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53] wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53] wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53] wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27] wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63] wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42] wire _roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:171:29] wire _roundedSig_T_13 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:181:42] wire _unboundedRange_roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:209:29] wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60] wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45] wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42] wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39] wire notNaN_isSpecialInfOut = io_in_isInf_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :236:49] wire [26:0] adjustedSig = io_in_sig_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :114:22] wire [32:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33] wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66] wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66] wire doShiftSigDown1 = adjustedSig[26]; // @[RoundAnyRawFNToRecFN.scala:114:22, :120:57] wire [8:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37] wire [8:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31] wire [22:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16] wire [22:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31] wire _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:196:50] wire common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37] wire _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:200:31] wire common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37] wire _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:217:40] wire common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37] wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49] wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37] wire [8:0] _roundMask_T = io_in_sExp_0[8:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :156:37] wire [8:0] _roundMask_T_1 = ~_roundMask_T; // @[primitives.scala:52:21] wire roundMask_msb = _roundMask_T_1[8]; // @[primitives.scala:52:21, :58:25] wire [7:0] roundMask_lsbs = _roundMask_T_1[7:0]; // @[primitives.scala:52:21, :59:26] wire roundMask_msb_1 = roundMask_lsbs[7]; // @[primitives.scala:58:25, :59:26] wire [6:0] roundMask_lsbs_1 = roundMask_lsbs[6:0]; // @[primitives.scala:59:26] wire roundMask_msb_2 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26] wire roundMask_msb_3 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26] wire [5:0] roundMask_lsbs_2 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26] wire [5:0] roundMask_lsbs_3 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26] wire [64:0] roundMask_shift = $signed(65'sh10000000000000000 >>> roundMask_lsbs_2); // @[primitives.scala:59:26, :76:56] wire [21:0] _roundMask_T_2 = roundMask_shift[63:42]; // @[primitives.scala:76:56, :78:22] wire [15:0] _roundMask_T_3 = _roundMask_T_2[15:0]; // @[primitives.scala:77:20, :78:22] wire [7:0] _roundMask_T_6 = _roundMask_T_3[15:8]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_7 = {8'h0, _roundMask_T_6}; // @[primitives.scala:77:20] wire [7:0] _roundMask_T_8 = _roundMask_T_3[7:0]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_9 = {_roundMask_T_8, 8'h0}; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_11 = _roundMask_T_9 & 16'hFF00; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_12 = _roundMask_T_7 | _roundMask_T_11; // @[primitives.scala:77:20] wire [11:0] _roundMask_T_16 = _roundMask_T_12[15:4]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_17 = {4'h0, _roundMask_T_16 & 12'hF0F}; // @[primitives.scala:77:20] wire [11:0] _roundMask_T_18 = _roundMask_T_12[11:0]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_19 = {_roundMask_T_18, 4'h0}; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_21 = _roundMask_T_19 & 16'hF0F0; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_22 = _roundMask_T_17 | _roundMask_T_21; // @[primitives.scala:77:20] wire [13:0] _roundMask_T_26 = _roundMask_T_22[15:2]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_27 = {2'h0, _roundMask_T_26 & 14'h3333}; // @[primitives.scala:77:20] wire [13:0] _roundMask_T_28 = _roundMask_T_22[13:0]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_29 = {_roundMask_T_28, 2'h0}; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_31 = _roundMask_T_29 & 16'hCCCC; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_32 = _roundMask_T_27 | _roundMask_T_31; // @[primitives.scala:77:20] wire [14:0] _roundMask_T_36 = _roundMask_T_32[15:1]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_37 = {1'h0, _roundMask_T_36 & 15'h5555}; // @[primitives.scala:77:20] wire [14:0] _roundMask_T_38 = _roundMask_T_32[14:0]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_39 = {_roundMask_T_38, 1'h0}; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_41 = _roundMask_T_39 & 16'hAAAA; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_42 = _roundMask_T_37 | _roundMask_T_41; // @[primitives.scala:77:20] wire [5:0] _roundMask_T_43 = _roundMask_T_2[21:16]; // @[primitives.scala:77:20, :78:22] wire [3:0] _roundMask_T_44 = _roundMask_T_43[3:0]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_45 = _roundMask_T_44[1:0]; // @[primitives.scala:77:20] wire _roundMask_T_46 = _roundMask_T_45[0]; // @[primitives.scala:77:20] wire _roundMask_T_47 = _roundMask_T_45[1]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_48 = {_roundMask_T_46, _roundMask_T_47}; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_49 = _roundMask_T_44[3:2]; // @[primitives.scala:77:20] wire _roundMask_T_50 = _roundMask_T_49[0]; // @[primitives.scala:77:20] wire _roundMask_T_51 = _roundMask_T_49[1]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_52 = {_roundMask_T_50, _roundMask_T_51}; // @[primitives.scala:77:20] wire [3:0] _roundMask_T_53 = {_roundMask_T_48, _roundMask_T_52}; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_54 = _roundMask_T_43[5:4]; // @[primitives.scala:77:20] wire _roundMask_T_55 = _roundMask_T_54[0]; // @[primitives.scala:77:20] wire _roundMask_T_56 = _roundMask_T_54[1]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_57 = {_roundMask_T_55, _roundMask_T_56}; // @[primitives.scala:77:20] wire [5:0] _roundMask_T_58 = {_roundMask_T_53, _roundMask_T_57}; // @[primitives.scala:77:20] wire [21:0] _roundMask_T_59 = {_roundMask_T_42, _roundMask_T_58}; // @[primitives.scala:77:20] wire [21:0] _roundMask_T_60 = ~_roundMask_T_59; // @[primitives.scala:73:32, :77:20] wire [21:0] _roundMask_T_61 = roundMask_msb_2 ? 22'h0 : _roundMask_T_60; // @[primitives.scala:58:25, :73:{21,32}] wire [21:0] _roundMask_T_62 = ~_roundMask_T_61; // @[primitives.scala:73:{17,21}] wire [24:0] _roundMask_T_63 = {_roundMask_T_62, 3'h7}; // @[primitives.scala:68:58, :73:17] wire [64:0] roundMask_shift_1 = $signed(65'sh10000000000000000 >>> roundMask_lsbs_3); // @[primitives.scala:59:26, :76:56] wire [2:0] _roundMask_T_64 = roundMask_shift_1[2:0]; // @[primitives.scala:76:56, :78:22] wire [1:0] _roundMask_T_65 = _roundMask_T_64[1:0]; // @[primitives.scala:77:20, :78:22] wire _roundMask_T_66 = _roundMask_T_65[0]; // @[primitives.scala:77:20] wire _roundMask_T_67 = _roundMask_T_65[1]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_68 = {_roundMask_T_66, _roundMask_T_67}; // @[primitives.scala:77:20] wire _roundMask_T_69 = _roundMask_T_64[2]; // @[primitives.scala:77:20, :78:22] wire [2:0] _roundMask_T_70 = {_roundMask_T_68, _roundMask_T_69}; // @[primitives.scala:77:20] wire [2:0] _roundMask_T_71 = roundMask_msb_3 ? _roundMask_T_70 : 3'h0; // @[primitives.scala:58:25, :62:24, :77:20] wire [24:0] _roundMask_T_72 = roundMask_msb_1 ? _roundMask_T_63 : {22'h0, _roundMask_T_71}; // @[primitives.scala:58:25, :62:24, :67:24, :68:58] wire [24:0] _roundMask_T_73 = roundMask_msb ? _roundMask_T_72 : 25'h0; // @[primitives.scala:58:25, :62:24, :67:24] wire [24:0] _roundMask_T_74 = {_roundMask_T_73[24:1], _roundMask_T_73[0] | doShiftSigDown1}; // @[primitives.scala:62:24] wire [26:0] roundMask = {_roundMask_T_74, 2'h3}; // @[RoundAnyRawFNToRecFN.scala:159:{23,42}] wire [27:0] _shiftedRoundMask_T = {1'h0, roundMask}; // @[RoundAnyRawFNToRecFN.scala:159:42, :162:41] wire [26:0] shiftedRoundMask = _shiftedRoundMask_T[27:1]; // @[RoundAnyRawFNToRecFN.scala:162:{41,53}] wire [26:0] _roundPosMask_T = ~shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:162:53, :163:28] wire [26:0] roundPosMask = _roundPosMask_T & roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :163:{28,46}] wire [26:0] _roundPosBit_T = adjustedSig & roundPosMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :163:46, :164:40] wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}] wire _roundIncr_T_1 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:67] wire _roundedSig_T_3 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :175:49] wire [26:0] _anyRoundExtra_T = adjustedSig & shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :162:53, :165:42] wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}] wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36] wire roundIncr = _roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31] wire [26:0] _roundedSig_T = adjustedSig | roundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :159:42, :174:32] wire [24:0] _roundedSig_T_1 = _roundedSig_T[26:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}] wire [25:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 26'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}] wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30] wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30] wire [25:0] _roundedSig_T_6 = roundMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:159:42, :177:35] wire [25:0] _roundedSig_T_7 = _roundedSig_T_5 ? _roundedSig_T_6 : 26'h0; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}, :177:35] wire [25:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}] wire [25:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21] wire [26:0] _roundedSig_T_10 = ~roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :180:32] wire [26:0] _roundedSig_T_11 = adjustedSig & _roundedSig_T_10; // @[RoundAnyRawFNToRecFN.scala:114:22, :180:{30,32}] wire [24:0] _roundedSig_T_12 = _roundedSig_T_11[26:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}] wire [25:0] _roundedSig_T_14 = roundPosMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:163:46, :181:67] wire [25:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12}; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}] wire [25:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47] wire [1:0] _sRoundedExp_T = roundedSig[25:24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54] wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}] wire [10:0] sRoundedExp = {io_in_sExp_0[9], io_in_sExp_0} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:48:5, :185:{40,76}] assign _common_expOut_T = sRoundedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37] assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37] wire [22:0] _common_fractOut_T = roundedSig[23:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27] wire [22:0] _common_fractOut_T_1 = roundedSig[22:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27] assign _common_fractOut_T_2 = doShiftSigDown1 ? _common_fractOut_T : _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :189:16, :190:27, :191:27] assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16] wire [3:0] _common_overflow_T = sRoundedExp[10:7]; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:30] assign _common_overflow_T_1 = $signed(_common_overflow_T) > 4'sh2; // @[RoundAnyRawFNToRecFN.scala:196:{30,50}] assign common_overflow = _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:124:37, :196:50] assign _common_totalUnderflow_T = $signed(sRoundedExp) < 11'sh6B; // @[RoundAnyRawFNToRecFN.scala:185:40, :200:31] assign common_totalUnderflow = _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:125:37, :200:31] wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45] wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45, :205:44] wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:61] wire unboundedRange_roundPosBit = doShiftSigDown1 ? _unboundedRange_roundPosBit_T : _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :203:{16,45,61}] wire _unboundedRange_roundIncr_T_1 = unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:67] wire _unboundedRange_anyRound_T_1 = doShiftSigDown1 & _unboundedRange_anyRound_T; // @[RoundAnyRawFNToRecFN.scala:120:57, :205:{30,44}] wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:114:22, :205:63] wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}] wire unboundedRange_anyRound = _unboundedRange_anyRound_T_1 | _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{30,49,70}] wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46] wire _roundCarry_T = roundedSig[25]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27] wire _roundCarry_T_1 = roundedSig[24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27] wire roundCarry = doShiftSigDown1 ? _roundCarry_T : _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :211:16, :212:27, :213:27] wire [1:0] _common_underflow_T = io_in_sExp_0[9:8]; // @[RoundAnyRawFNToRecFN.scala:48:5, :220:49] wire _common_underflow_T_1 = _common_underflow_T != 2'h1; // @[RoundAnyRawFNToRecFN.scala:220:{49,64}] wire _common_underflow_T_2 = anyRound & _common_underflow_T_1; // @[RoundAnyRawFNToRecFN.scala:166:36, :220:{32,64}] wire _common_underflow_T_3 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57] wire _common_underflow_T_9 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57, :225:49] wire _common_underflow_T_4 = roundMask[2]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:71] wire _common_underflow_T_5 = doShiftSigDown1 ? _common_underflow_T_3 : _common_underflow_T_4; // @[RoundAnyRawFNToRecFN.scala:120:57, :221:{30,57,71}] wire _common_underflow_T_6 = _common_underflow_T_2 & _common_underflow_T_5; // @[RoundAnyRawFNToRecFN.scala:220:{32,72}, :221:30] wire _common_underflow_T_8 = roundMask[4]; // @[RoundAnyRawFNToRecFN.scala:159:42, :224:49] wire _common_underflow_T_10 = doShiftSigDown1 ? _common_underflow_T_8 : _common_underflow_T_9; // @[RoundAnyRawFNToRecFN.scala:120:57, :223:39, :224:49, :225:49] wire _common_underflow_T_11 = ~_common_underflow_T_10; // @[RoundAnyRawFNToRecFN.scala:223:{34,39}] wire _common_underflow_T_12 = _common_underflow_T_11; // @[RoundAnyRawFNToRecFN.scala:222:77, :223:34] wire _common_underflow_T_13 = _common_underflow_T_12 & roundCarry; // @[RoundAnyRawFNToRecFN.scala:211:16, :222:77, :226:38] wire _common_underflow_T_14 = _common_underflow_T_13 & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :226:38, :227:45] wire _common_underflow_T_15 = _common_underflow_T_14 & unboundedRange_roundIncr; // @[RoundAnyRawFNToRecFN.scala:208:46, :227:{45,60}] wire _common_underflow_T_16 = ~_common_underflow_T_15; // @[RoundAnyRawFNToRecFN.scala:222:27, :227:60] wire _common_underflow_T_17 = _common_underflow_T_6 & _common_underflow_T_16; // @[RoundAnyRawFNToRecFN.scala:220:72, :221:76, :222:27] assign _common_underflow_T_18 = common_totalUnderflow | _common_underflow_T_17; // @[RoundAnyRawFNToRecFN.scala:125:37, :217:40, :221:76] assign common_underflow = _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:126:37, :217:40] assign _common_inexact_T = common_totalUnderflow | anyRound; // @[RoundAnyRawFNToRecFN.scala:125:37, :166:36, :230:49] assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49] wire isNaNOut = io_invalidExc_0 | io_in_isNaN_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34] wire _commonCase_T = ~isNaNOut; // @[RoundAnyRawFNToRecFN.scala:235:34, :237:22] wire _commonCase_T_1 = ~notNaN_isSpecialInfOut; // @[RoundAnyRawFNToRecFN.scala:236:49, :237:36] wire _commonCase_T_2 = _commonCase_T & _commonCase_T_1; // @[RoundAnyRawFNToRecFN.scala:237:{22,33,36}] wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64] wire commonCase = _commonCase_T_2 & _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{33,61,64}] wire overflow = commonCase & common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37, :237:61, :238:32] wire _notNaN_isInfOut_T = overflow; // @[RoundAnyRawFNToRecFN.scala:238:32, :248:45] wire underflow = commonCase & common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37, :237:61, :239:32] wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43] wire inexact = overflow | _inexact_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :240:{28,43}] wire _pegMinNonzeroMagOut_T = commonCase & common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :237:61, :245:20] wire notNaN_isInfOut = notNaN_isSpecialInfOut | _notNaN_isInfOut_T; // @[RoundAnyRawFNToRecFN.scala:236:49, :248:{32,45}] wire signOut = ~isNaNOut & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :250:22] wire _expOut_T = io_in_isZero_0 | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:48:5, :125:37, :253:32] wire [8:0] _expOut_T_1 = _expOut_T ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}] wire [8:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}] wire [8:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14] wire [8:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17] wire [8:0] _expOut_T_10 = _expOut_T_7; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17] wire [8:0] _expOut_T_11 = {2'h0, notNaN_isInfOut, 6'h0}; // @[RoundAnyRawFNToRecFN.scala:248:32, :265:18] wire [8:0] _expOut_T_12 = ~_expOut_T_11; // @[RoundAnyRawFNToRecFN.scala:265:{14,18}] wire [8:0] _expOut_T_13 = _expOut_T_10 & _expOut_T_12; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17, :265:14] wire [8:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18] wire [8:0] _expOut_T_17 = _expOut_T_15; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15] wire [8:0] _expOut_T_18 = notNaN_isInfOut ? 9'h180 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:248:32, :277:16] wire [8:0] _expOut_T_19 = _expOut_T_17 | _expOut_T_18; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15, :277:16] wire [8:0] _expOut_T_20 = isNaNOut ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:235:34, :278:16] wire [8:0] expOut = _expOut_T_19 | _expOut_T_20; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73, :278:16] wire _fractOut_T = isNaNOut | io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :280:22] wire _fractOut_T_1 = _fractOut_T | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :280:{22,38}] wire [22:0] _fractOut_T_2 = {isNaNOut, 22'h0}; // @[RoundAnyRawFNToRecFN.scala:235:34, :281:16] wire [22:0] _fractOut_T_3 = _fractOut_T_1 ? _fractOut_T_2 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16] wire [22:0] fractOut = _fractOut_T_3; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11] wire [9:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23] assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}] assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33] wire [1:0] _io_exceptionFlags_T = {io_invalidExc_0, 1'h0}; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:23] wire [2:0] _io_exceptionFlags_T_1 = {_io_exceptionFlags_T, overflow}; // @[RoundAnyRawFNToRecFN.scala:238:32, :288:{23,41}] wire [3:0] _io_exceptionFlags_T_2 = {_io_exceptionFlags_T_1, underflow}; // @[RoundAnyRawFNToRecFN.scala:239:32, :288:{41,53}] assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}] assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66] assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File SwitchAllocator.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ class SwitchAllocReq(val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams]) (implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) val tail = Bool() } class SwitchArbiter(inN: Int, outN: Int, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Module { val io = IO(new Bundle { val in = Flipped(Vec(inN, Decoupled(new SwitchAllocReq(outParams, egressParams)))) val out = Vec(outN, Decoupled(new SwitchAllocReq(outParams, egressParams))) val chosen_oh = Vec(outN, Output(UInt(inN.W))) }) val lock = Seq.fill(outN) { RegInit(0.U(inN.W)) } val unassigned = Cat(io.in.map(_.valid).reverse) & ~(lock.reduce(_|_)) val mask = RegInit(0.U(inN.W)) val choices = Wire(Vec(outN, UInt(inN.W))) var sel = PriorityEncoderOH(Cat(unassigned, unassigned & ~mask)) for (i <- 0 until outN) { choices(i) := sel | (sel >> inN) sel = PriorityEncoderOH(unassigned & ~choices(i)) } io.in.foreach(_.ready := false.B) var chosens = 0.U(inN.W) val in_tails = Cat(io.in.map(_.bits.tail).reverse) for (i <- 0 until outN) { val in_valids = Cat((0 until inN).map { j => io.in(j).valid && !chosens(j) }.reverse) val chosen = Mux((in_valids & lock(i) & ~chosens).orR, lock(i), choices(i)) io.chosen_oh(i) := chosen io.out(i).valid := (in_valids & chosen).orR io.out(i).bits := Mux1H(chosen, io.in.map(_.bits)) for (j <- 0 until inN) { when (chosen(j) && io.out(i).ready) { io.in(j).ready := true.B } } chosens = chosens | chosen when (io.out(i).fire) { lock(i) := chosen & ~in_tails } } when (io.out(0).fire) { mask := (0 until inN).map { i => (io.chosen_oh(0) >> i) }.reduce(_|_) } .otherwise { mask := Mux(~mask === 0.U, 0.U, (mask << 1) | 1.U(1.W)) } } class SwitchAllocator( val routerParams: RouterParams, val inParams: Seq[ChannelParams], val outParams: Seq[ChannelParams], val ingressParams: Seq[IngressChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterParams with HasRouterInputParams with HasRouterOutputParams { val io = IO(new Bundle { val req = MixedVec(allInParams.map(u => Vec(u.destSpeedup, Flipped(Decoupled(new SwitchAllocReq(outParams, egressParams)))))) val credit_alloc = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Output(new OutputCreditAlloc))}) val switch_sel = MixedVec(allOutParams.map { o => Vec(o.srcSpeedup, MixedVec(allInParams.map { i => Vec(i.destSpeedup, Output(Bool())) })) }) }) val nInputChannels = allInParams.map(_.nVirtualChannels).sum val arbs = allOutParams.map { oP => Module(new SwitchArbiter( allInParams.map(_.destSpeedup).reduce(_+_), oP.srcSpeedup, outParams, egressParams ))} arbs.foreach(_.io.out.foreach(_.ready := true.B)) var idx = 0 io.req.foreach(_.foreach { o => val fires = Wire(Vec(arbs.size, Bool())) arbs.zipWithIndex.foreach { case (a,i) => a.io.in(idx).valid := o.valid && o.bits.vc_sel(i).reduce(_||_) a.io.in(idx).bits := o.bits fires(i) := a.io.in(idx).fire } o.ready := fires.reduce(_||_) idx += 1 }) for (i <- 0 until nAllOutputs) { for (j <- 0 until allOutParams(i).srcSpeedup) { idx = 0 for (m <- 0 until nAllInputs) { for (n <- 0 until allInParams(m).destSpeedup) { io.switch_sel(i)(j)(m)(n) := arbs(i).io.in(idx).valid && arbs(i).io.chosen_oh(j)(idx) && arbs(i).io.out(j).valid idx += 1 } } } } io.credit_alloc.foreach(_.foreach(_.alloc := false.B)) io.credit_alloc.foreach(_.foreach(_.tail := false.B)) (arbs zip io.credit_alloc).zipWithIndex.map { case ((a,i),t) => for (j <- 0 until i.size) { for (k <- 0 until a.io.out.size) { when (a.io.out(k).valid && a.io.out(k).bits.vc_sel(t)(j)) { i(j).alloc := true.B i(j).tail := a.io.out(k).bits.tail } } } } }
module SwitchAllocator_55( // @[SwitchAllocator.scala:64:7] input clock, // @[SwitchAllocator.scala:64:7] input reset, // @[SwitchAllocator.scala:64:7] output io_req_2_0_ready, // @[SwitchAllocator.scala:74:14] input io_req_2_0_valid, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_2_2, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_2_3, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_2_4, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_2_5, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_2_6, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_2_7, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_2_8, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_2_9, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_1_2, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_1_3, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_1_4, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_1_5, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_1_6, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_1_7, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_1_8, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_1_9, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_0_2, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_0_3, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_0_4, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_0_5, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_0_6, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_0_7, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_0_8, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_vc_sel_0_9, // @[SwitchAllocator.scala:74:14] input io_req_2_0_bits_tail, // @[SwitchAllocator.scala:74:14] output io_req_1_0_ready, // @[SwitchAllocator.scala:74:14] input io_req_1_0_valid, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_2_2, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_2_3, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_2_4, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_2_5, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_2_6, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_2_7, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_2_8, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_2_9, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_1_2, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_1_3, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_1_4, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_1_5, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_1_6, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_1_7, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_1_8, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_1_9, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_0_2, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_0_3, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_0_4, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_0_5, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_0_6, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_0_7, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_0_8, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_vc_sel_0_9, // @[SwitchAllocator.scala:74:14] input io_req_1_0_bits_tail, // @[SwitchAllocator.scala:74:14] output io_req_0_0_ready, // @[SwitchAllocator.scala:74:14] input io_req_0_0_valid, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_2_2, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_2_3, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_2_4, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_2_5, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_2_6, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_2_7, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_2_8, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_2_9, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_1_2, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_1_3, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_1_4, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_1_5, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_1_6, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_1_7, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_1_8, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_1_9, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_0_2, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_0_3, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_0_4, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_0_5, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_0_6, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_0_7, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_0_8, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_vc_sel_0_9, // @[SwitchAllocator.scala:74:14] input io_req_0_0_bits_tail, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_2_2_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_2_3_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_2_4_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_2_5_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_2_6_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_2_7_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_2_8_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_2_9_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_1_2_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_1_3_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_1_4_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_1_5_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_1_6_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_1_7_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_1_8_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_1_9_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_0_2_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_0_3_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_0_4_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_0_5_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_0_6_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_0_7_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_0_8_alloc, // @[SwitchAllocator.scala:74:14] output io_credit_alloc_0_9_alloc, // @[SwitchAllocator.scala:74:14] output io_switch_sel_2_0_2_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_2_0_1_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_2_0_0_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_1_0_2_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_1_0_1_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_1_0_0_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_0_0_2_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_0_0_1_0, // @[SwitchAllocator.scala:74:14] output io_switch_sel_0_0_0_0 // @[SwitchAllocator.scala:74:14] ); wire _arbs_2_io_in_0_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_in_1_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_in_2_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_out_0_bits_vc_sel_2_2; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_out_0_bits_vc_sel_2_3; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_out_0_bits_vc_sel_2_4; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_out_0_bits_vc_sel_2_5; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_out_0_bits_vc_sel_2_6; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_out_0_bits_vc_sel_2_7; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_out_0_bits_vc_sel_2_8; // @[SwitchAllocator.scala:83:45] wire _arbs_2_io_out_0_bits_vc_sel_2_9; // @[SwitchAllocator.scala:83:45] wire [2:0] _arbs_2_io_chosen_oh_0; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_in_0_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_in_1_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_in_2_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_out_0_bits_vc_sel_1_2; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_out_0_bits_vc_sel_1_3; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_out_0_bits_vc_sel_1_4; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_out_0_bits_vc_sel_1_5; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_out_0_bits_vc_sel_1_6; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_out_0_bits_vc_sel_1_7; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_out_0_bits_vc_sel_1_8; // @[SwitchAllocator.scala:83:45] wire _arbs_1_io_out_0_bits_vc_sel_1_9; // @[SwitchAllocator.scala:83:45] wire [2:0] _arbs_1_io_chosen_oh_0; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_in_0_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_in_1_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_in_2_ready; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_out_0_bits_vc_sel_0_2; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_out_0_bits_vc_sel_0_3; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_out_0_bits_vc_sel_0_4; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_out_0_bits_vc_sel_0_5; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_out_0_bits_vc_sel_0_6; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_out_0_bits_vc_sel_0_7; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_out_0_bits_vc_sel_0_8; // @[SwitchAllocator.scala:83:45] wire _arbs_0_io_out_0_bits_vc_sel_0_9; // @[SwitchAllocator.scala:83:45] wire [2:0] _arbs_0_io_chosen_oh_0; // @[SwitchAllocator.scala:83:45] wire arbs_0_io_in_0_valid = io_req_0_0_valid & (io_req_0_0_bits_vc_sel_0_2 | io_req_0_0_bits_vc_sel_0_3 | io_req_0_0_bits_vc_sel_0_4 | io_req_0_0_bits_vc_sel_0_5 | io_req_0_0_bits_vc_sel_0_6 | io_req_0_0_bits_vc_sel_0_7 | io_req_0_0_bits_vc_sel_0_8 | io_req_0_0_bits_vc_sel_0_9); // @[SwitchAllocator.scala:95:{37,65}] wire arbs_1_io_in_0_valid = io_req_0_0_valid & (io_req_0_0_bits_vc_sel_1_2 | io_req_0_0_bits_vc_sel_1_3 | io_req_0_0_bits_vc_sel_1_4 | io_req_0_0_bits_vc_sel_1_5 | io_req_0_0_bits_vc_sel_1_6 | io_req_0_0_bits_vc_sel_1_7 | io_req_0_0_bits_vc_sel_1_8 | io_req_0_0_bits_vc_sel_1_9); // @[SwitchAllocator.scala:95:{37,65}] wire arbs_2_io_in_0_valid = io_req_0_0_valid & (io_req_0_0_bits_vc_sel_2_2 | io_req_0_0_bits_vc_sel_2_3 | io_req_0_0_bits_vc_sel_2_4 | io_req_0_0_bits_vc_sel_2_5 | io_req_0_0_bits_vc_sel_2_6 | io_req_0_0_bits_vc_sel_2_7 | io_req_0_0_bits_vc_sel_2_8 | io_req_0_0_bits_vc_sel_2_9); // @[SwitchAllocator.scala:95:{37,65}] wire arbs_0_io_in_1_valid = io_req_1_0_valid & (io_req_1_0_bits_vc_sel_0_2 | io_req_1_0_bits_vc_sel_0_3 | io_req_1_0_bits_vc_sel_0_4 | io_req_1_0_bits_vc_sel_0_5 | io_req_1_0_bits_vc_sel_0_6 | io_req_1_0_bits_vc_sel_0_7 | io_req_1_0_bits_vc_sel_0_8 | io_req_1_0_bits_vc_sel_0_9); // @[SwitchAllocator.scala:95:{37,65}] wire arbs_1_io_in_1_valid = io_req_1_0_valid & (io_req_1_0_bits_vc_sel_1_2 | io_req_1_0_bits_vc_sel_1_3 | io_req_1_0_bits_vc_sel_1_4 | io_req_1_0_bits_vc_sel_1_5 | io_req_1_0_bits_vc_sel_1_6 | io_req_1_0_bits_vc_sel_1_7 | io_req_1_0_bits_vc_sel_1_8 | io_req_1_0_bits_vc_sel_1_9); // @[SwitchAllocator.scala:95:{37,65}] wire arbs_2_io_in_1_valid = io_req_1_0_valid & (io_req_1_0_bits_vc_sel_2_2 | io_req_1_0_bits_vc_sel_2_3 | io_req_1_0_bits_vc_sel_2_4 | io_req_1_0_bits_vc_sel_2_5 | io_req_1_0_bits_vc_sel_2_6 | io_req_1_0_bits_vc_sel_2_7 | io_req_1_0_bits_vc_sel_2_8 | io_req_1_0_bits_vc_sel_2_9); // @[SwitchAllocator.scala:95:{37,65}] wire arbs_0_io_in_2_valid = io_req_2_0_valid & (io_req_2_0_bits_vc_sel_0_2 | io_req_2_0_bits_vc_sel_0_3 | io_req_2_0_bits_vc_sel_0_4 | io_req_2_0_bits_vc_sel_0_5 | io_req_2_0_bits_vc_sel_0_6 | io_req_2_0_bits_vc_sel_0_7 | io_req_2_0_bits_vc_sel_0_8 | io_req_2_0_bits_vc_sel_0_9); // @[SwitchAllocator.scala:95:{37,65}] wire arbs_1_io_in_2_valid = io_req_2_0_valid & (io_req_2_0_bits_vc_sel_1_2 | io_req_2_0_bits_vc_sel_1_3 | io_req_2_0_bits_vc_sel_1_4 | io_req_2_0_bits_vc_sel_1_5 | io_req_2_0_bits_vc_sel_1_6 | io_req_2_0_bits_vc_sel_1_7 | io_req_2_0_bits_vc_sel_1_8 | io_req_2_0_bits_vc_sel_1_9); // @[SwitchAllocator.scala:95:{37,65}] wire arbs_2_io_in_2_valid = io_req_2_0_valid & (io_req_2_0_bits_vc_sel_2_2 | io_req_2_0_bits_vc_sel_2_3 | io_req_2_0_bits_vc_sel_2_4 | io_req_2_0_bits_vc_sel_2_5 | io_req_2_0_bits_vc_sel_2_6 | io_req_2_0_bits_vc_sel_2_7 | io_req_2_0_bits_vc_sel_2_8 | io_req_2_0_bits_vc_sel_2_9); // @[SwitchAllocator.scala:95:{37,65}] SwitchArbiter_313 arbs_0 ( // @[SwitchAllocator.scala:83:45] .clock (clock), .reset (reset), .io_in_0_ready (_arbs_0_io_in_0_ready), .io_in_0_valid (arbs_0_io_in_0_valid), // @[SwitchAllocator.scala:95:37] .io_in_0_bits_vc_sel_2_2 (io_req_0_0_bits_vc_sel_2_2), .io_in_0_bits_vc_sel_2_3 (io_req_0_0_bits_vc_sel_2_3), .io_in_0_bits_vc_sel_2_4 (io_req_0_0_bits_vc_sel_2_4), .io_in_0_bits_vc_sel_2_5 (io_req_0_0_bits_vc_sel_2_5), .io_in_0_bits_vc_sel_2_6 (io_req_0_0_bits_vc_sel_2_6), .io_in_0_bits_vc_sel_2_7 (io_req_0_0_bits_vc_sel_2_7), .io_in_0_bits_vc_sel_2_8 (io_req_0_0_bits_vc_sel_2_8), .io_in_0_bits_vc_sel_2_9 (io_req_0_0_bits_vc_sel_2_9), .io_in_0_bits_vc_sel_1_2 (io_req_0_0_bits_vc_sel_1_2), .io_in_0_bits_vc_sel_1_3 (io_req_0_0_bits_vc_sel_1_3), .io_in_0_bits_vc_sel_1_4 (io_req_0_0_bits_vc_sel_1_4), .io_in_0_bits_vc_sel_1_5 (io_req_0_0_bits_vc_sel_1_5), .io_in_0_bits_vc_sel_1_6 (io_req_0_0_bits_vc_sel_1_6), .io_in_0_bits_vc_sel_1_7 (io_req_0_0_bits_vc_sel_1_7), .io_in_0_bits_vc_sel_1_8 (io_req_0_0_bits_vc_sel_1_8), .io_in_0_bits_vc_sel_1_9 (io_req_0_0_bits_vc_sel_1_9), .io_in_0_bits_vc_sel_0_2 (io_req_0_0_bits_vc_sel_0_2), .io_in_0_bits_vc_sel_0_3 (io_req_0_0_bits_vc_sel_0_3), .io_in_0_bits_vc_sel_0_4 (io_req_0_0_bits_vc_sel_0_4), .io_in_0_bits_vc_sel_0_5 (io_req_0_0_bits_vc_sel_0_5), .io_in_0_bits_vc_sel_0_6 (io_req_0_0_bits_vc_sel_0_6), .io_in_0_bits_vc_sel_0_7 (io_req_0_0_bits_vc_sel_0_7), .io_in_0_bits_vc_sel_0_8 (io_req_0_0_bits_vc_sel_0_8), .io_in_0_bits_vc_sel_0_9 (io_req_0_0_bits_vc_sel_0_9), .io_in_0_bits_tail (io_req_0_0_bits_tail), .io_in_1_ready (_arbs_0_io_in_1_ready), .io_in_1_valid (arbs_0_io_in_1_valid), // @[SwitchAllocator.scala:95:37] .io_in_1_bits_vc_sel_2_2 (io_req_1_0_bits_vc_sel_2_2), .io_in_1_bits_vc_sel_2_3 (io_req_1_0_bits_vc_sel_2_3), .io_in_1_bits_vc_sel_2_4 (io_req_1_0_bits_vc_sel_2_4), .io_in_1_bits_vc_sel_2_5 (io_req_1_0_bits_vc_sel_2_5), .io_in_1_bits_vc_sel_2_6 (io_req_1_0_bits_vc_sel_2_6), .io_in_1_bits_vc_sel_2_7 (io_req_1_0_bits_vc_sel_2_7), .io_in_1_bits_vc_sel_2_8 (io_req_1_0_bits_vc_sel_2_8), .io_in_1_bits_vc_sel_2_9 (io_req_1_0_bits_vc_sel_2_9), .io_in_1_bits_vc_sel_1_2 (io_req_1_0_bits_vc_sel_1_2), .io_in_1_bits_vc_sel_1_3 (io_req_1_0_bits_vc_sel_1_3), .io_in_1_bits_vc_sel_1_4 (io_req_1_0_bits_vc_sel_1_4), .io_in_1_bits_vc_sel_1_5 (io_req_1_0_bits_vc_sel_1_5), .io_in_1_bits_vc_sel_1_6 (io_req_1_0_bits_vc_sel_1_6), .io_in_1_bits_vc_sel_1_7 (io_req_1_0_bits_vc_sel_1_7), .io_in_1_bits_vc_sel_1_8 (io_req_1_0_bits_vc_sel_1_8), .io_in_1_bits_vc_sel_1_9 (io_req_1_0_bits_vc_sel_1_9), .io_in_1_bits_vc_sel_0_2 (io_req_1_0_bits_vc_sel_0_2), .io_in_1_bits_vc_sel_0_3 (io_req_1_0_bits_vc_sel_0_3), .io_in_1_bits_vc_sel_0_4 (io_req_1_0_bits_vc_sel_0_4), .io_in_1_bits_vc_sel_0_5 (io_req_1_0_bits_vc_sel_0_5), .io_in_1_bits_vc_sel_0_6 (io_req_1_0_bits_vc_sel_0_6), .io_in_1_bits_vc_sel_0_7 (io_req_1_0_bits_vc_sel_0_7), .io_in_1_bits_vc_sel_0_8 (io_req_1_0_bits_vc_sel_0_8), .io_in_1_bits_vc_sel_0_9 (io_req_1_0_bits_vc_sel_0_9), .io_in_1_bits_tail (io_req_1_0_bits_tail), .io_in_2_ready (_arbs_0_io_in_2_ready), .io_in_2_valid (arbs_0_io_in_2_valid), // @[SwitchAllocator.scala:95:37] .io_in_2_bits_vc_sel_2_2 (io_req_2_0_bits_vc_sel_2_2), .io_in_2_bits_vc_sel_2_3 (io_req_2_0_bits_vc_sel_2_3), .io_in_2_bits_vc_sel_2_4 (io_req_2_0_bits_vc_sel_2_4), .io_in_2_bits_vc_sel_2_5 (io_req_2_0_bits_vc_sel_2_5), .io_in_2_bits_vc_sel_2_6 (io_req_2_0_bits_vc_sel_2_6), .io_in_2_bits_vc_sel_2_7 (io_req_2_0_bits_vc_sel_2_7), .io_in_2_bits_vc_sel_2_8 (io_req_2_0_bits_vc_sel_2_8), .io_in_2_bits_vc_sel_2_9 (io_req_2_0_bits_vc_sel_2_9), .io_in_2_bits_vc_sel_1_2 (io_req_2_0_bits_vc_sel_1_2), .io_in_2_bits_vc_sel_1_3 (io_req_2_0_bits_vc_sel_1_3), .io_in_2_bits_vc_sel_1_4 (io_req_2_0_bits_vc_sel_1_4), .io_in_2_bits_vc_sel_1_5 (io_req_2_0_bits_vc_sel_1_5), .io_in_2_bits_vc_sel_1_6 (io_req_2_0_bits_vc_sel_1_6), .io_in_2_bits_vc_sel_1_7 (io_req_2_0_bits_vc_sel_1_7), .io_in_2_bits_vc_sel_1_8 (io_req_2_0_bits_vc_sel_1_8), .io_in_2_bits_vc_sel_1_9 (io_req_2_0_bits_vc_sel_1_9), .io_in_2_bits_vc_sel_0_2 (io_req_2_0_bits_vc_sel_0_2), .io_in_2_bits_vc_sel_0_3 (io_req_2_0_bits_vc_sel_0_3), .io_in_2_bits_vc_sel_0_4 (io_req_2_0_bits_vc_sel_0_4), .io_in_2_bits_vc_sel_0_5 (io_req_2_0_bits_vc_sel_0_5), .io_in_2_bits_vc_sel_0_6 (io_req_2_0_bits_vc_sel_0_6), .io_in_2_bits_vc_sel_0_7 (io_req_2_0_bits_vc_sel_0_7), .io_in_2_bits_vc_sel_0_8 (io_req_2_0_bits_vc_sel_0_8), .io_in_2_bits_vc_sel_0_9 (io_req_2_0_bits_vc_sel_0_9), .io_in_2_bits_tail (io_req_2_0_bits_tail), .io_out_0_valid (_arbs_0_io_out_0_valid), .io_out_0_bits_vc_sel_2_2 (/* unused */), .io_out_0_bits_vc_sel_2_3 (/* unused */), .io_out_0_bits_vc_sel_2_4 (/* unused */), .io_out_0_bits_vc_sel_2_5 (/* unused */), .io_out_0_bits_vc_sel_2_6 (/* unused */), .io_out_0_bits_vc_sel_2_7 (/* unused */), .io_out_0_bits_vc_sel_2_8 (/* unused */), .io_out_0_bits_vc_sel_2_9 (/* unused */), .io_out_0_bits_vc_sel_1_2 (/* unused */), .io_out_0_bits_vc_sel_1_3 (/* unused */), .io_out_0_bits_vc_sel_1_4 (/* unused */), .io_out_0_bits_vc_sel_1_5 (/* unused */), .io_out_0_bits_vc_sel_1_6 (/* unused */), .io_out_0_bits_vc_sel_1_7 (/* unused */), .io_out_0_bits_vc_sel_1_8 (/* unused */), .io_out_0_bits_vc_sel_1_9 (/* unused */), .io_out_0_bits_vc_sel_0_2 (_arbs_0_io_out_0_bits_vc_sel_0_2), .io_out_0_bits_vc_sel_0_3 (_arbs_0_io_out_0_bits_vc_sel_0_3), .io_out_0_bits_vc_sel_0_4 (_arbs_0_io_out_0_bits_vc_sel_0_4), .io_out_0_bits_vc_sel_0_5 (_arbs_0_io_out_0_bits_vc_sel_0_5), .io_out_0_bits_vc_sel_0_6 (_arbs_0_io_out_0_bits_vc_sel_0_6), .io_out_0_bits_vc_sel_0_7 (_arbs_0_io_out_0_bits_vc_sel_0_7), .io_out_0_bits_vc_sel_0_8 (_arbs_0_io_out_0_bits_vc_sel_0_8), .io_out_0_bits_vc_sel_0_9 (_arbs_0_io_out_0_bits_vc_sel_0_9), .io_chosen_oh_0 (_arbs_0_io_chosen_oh_0) ); // @[SwitchAllocator.scala:83:45] SwitchArbiter_313 arbs_1 ( // @[SwitchAllocator.scala:83:45] .clock (clock), .reset (reset), .io_in_0_ready (_arbs_1_io_in_0_ready), .io_in_0_valid (arbs_1_io_in_0_valid), // @[SwitchAllocator.scala:95:37] .io_in_0_bits_vc_sel_2_2 (io_req_0_0_bits_vc_sel_2_2), .io_in_0_bits_vc_sel_2_3 (io_req_0_0_bits_vc_sel_2_3), .io_in_0_bits_vc_sel_2_4 (io_req_0_0_bits_vc_sel_2_4), .io_in_0_bits_vc_sel_2_5 (io_req_0_0_bits_vc_sel_2_5), .io_in_0_bits_vc_sel_2_6 (io_req_0_0_bits_vc_sel_2_6), .io_in_0_bits_vc_sel_2_7 (io_req_0_0_bits_vc_sel_2_7), .io_in_0_bits_vc_sel_2_8 (io_req_0_0_bits_vc_sel_2_8), .io_in_0_bits_vc_sel_2_9 (io_req_0_0_bits_vc_sel_2_9), .io_in_0_bits_vc_sel_1_2 (io_req_0_0_bits_vc_sel_1_2), .io_in_0_bits_vc_sel_1_3 (io_req_0_0_bits_vc_sel_1_3), .io_in_0_bits_vc_sel_1_4 (io_req_0_0_bits_vc_sel_1_4), .io_in_0_bits_vc_sel_1_5 (io_req_0_0_bits_vc_sel_1_5), .io_in_0_bits_vc_sel_1_6 (io_req_0_0_bits_vc_sel_1_6), .io_in_0_bits_vc_sel_1_7 (io_req_0_0_bits_vc_sel_1_7), .io_in_0_bits_vc_sel_1_8 (io_req_0_0_bits_vc_sel_1_8), .io_in_0_bits_vc_sel_1_9 (io_req_0_0_bits_vc_sel_1_9), .io_in_0_bits_vc_sel_0_2 (io_req_0_0_bits_vc_sel_0_2), .io_in_0_bits_vc_sel_0_3 (io_req_0_0_bits_vc_sel_0_3), .io_in_0_bits_vc_sel_0_4 (io_req_0_0_bits_vc_sel_0_4), .io_in_0_bits_vc_sel_0_5 (io_req_0_0_bits_vc_sel_0_5), .io_in_0_bits_vc_sel_0_6 (io_req_0_0_bits_vc_sel_0_6), .io_in_0_bits_vc_sel_0_7 (io_req_0_0_bits_vc_sel_0_7), .io_in_0_bits_vc_sel_0_8 (io_req_0_0_bits_vc_sel_0_8), .io_in_0_bits_vc_sel_0_9 (io_req_0_0_bits_vc_sel_0_9), .io_in_0_bits_tail (io_req_0_0_bits_tail), .io_in_1_ready (_arbs_1_io_in_1_ready), .io_in_1_valid (arbs_1_io_in_1_valid), // @[SwitchAllocator.scala:95:37] .io_in_1_bits_vc_sel_2_2 (io_req_1_0_bits_vc_sel_2_2), .io_in_1_bits_vc_sel_2_3 (io_req_1_0_bits_vc_sel_2_3), .io_in_1_bits_vc_sel_2_4 (io_req_1_0_bits_vc_sel_2_4), .io_in_1_bits_vc_sel_2_5 (io_req_1_0_bits_vc_sel_2_5), .io_in_1_bits_vc_sel_2_6 (io_req_1_0_bits_vc_sel_2_6), .io_in_1_bits_vc_sel_2_7 (io_req_1_0_bits_vc_sel_2_7), .io_in_1_bits_vc_sel_2_8 (io_req_1_0_bits_vc_sel_2_8), .io_in_1_bits_vc_sel_2_9 (io_req_1_0_bits_vc_sel_2_9), .io_in_1_bits_vc_sel_1_2 (io_req_1_0_bits_vc_sel_1_2), .io_in_1_bits_vc_sel_1_3 (io_req_1_0_bits_vc_sel_1_3), .io_in_1_bits_vc_sel_1_4 (io_req_1_0_bits_vc_sel_1_4), .io_in_1_bits_vc_sel_1_5 (io_req_1_0_bits_vc_sel_1_5), .io_in_1_bits_vc_sel_1_6 (io_req_1_0_bits_vc_sel_1_6), .io_in_1_bits_vc_sel_1_7 (io_req_1_0_bits_vc_sel_1_7), .io_in_1_bits_vc_sel_1_8 (io_req_1_0_bits_vc_sel_1_8), .io_in_1_bits_vc_sel_1_9 (io_req_1_0_bits_vc_sel_1_9), .io_in_1_bits_vc_sel_0_2 (io_req_1_0_bits_vc_sel_0_2), .io_in_1_bits_vc_sel_0_3 (io_req_1_0_bits_vc_sel_0_3), .io_in_1_bits_vc_sel_0_4 (io_req_1_0_bits_vc_sel_0_4), .io_in_1_bits_vc_sel_0_5 (io_req_1_0_bits_vc_sel_0_5), .io_in_1_bits_vc_sel_0_6 (io_req_1_0_bits_vc_sel_0_6), .io_in_1_bits_vc_sel_0_7 (io_req_1_0_bits_vc_sel_0_7), .io_in_1_bits_vc_sel_0_8 (io_req_1_0_bits_vc_sel_0_8), .io_in_1_bits_vc_sel_0_9 (io_req_1_0_bits_vc_sel_0_9), .io_in_1_bits_tail (io_req_1_0_bits_tail), .io_in_2_ready (_arbs_1_io_in_2_ready), .io_in_2_valid (arbs_1_io_in_2_valid), // @[SwitchAllocator.scala:95:37] .io_in_2_bits_vc_sel_2_2 (io_req_2_0_bits_vc_sel_2_2), .io_in_2_bits_vc_sel_2_3 (io_req_2_0_bits_vc_sel_2_3), .io_in_2_bits_vc_sel_2_4 (io_req_2_0_bits_vc_sel_2_4), .io_in_2_bits_vc_sel_2_5 (io_req_2_0_bits_vc_sel_2_5), .io_in_2_bits_vc_sel_2_6 (io_req_2_0_bits_vc_sel_2_6), .io_in_2_bits_vc_sel_2_7 (io_req_2_0_bits_vc_sel_2_7), .io_in_2_bits_vc_sel_2_8 (io_req_2_0_bits_vc_sel_2_8), .io_in_2_bits_vc_sel_2_9 (io_req_2_0_bits_vc_sel_2_9), .io_in_2_bits_vc_sel_1_2 (io_req_2_0_bits_vc_sel_1_2), .io_in_2_bits_vc_sel_1_3 (io_req_2_0_bits_vc_sel_1_3), .io_in_2_bits_vc_sel_1_4 (io_req_2_0_bits_vc_sel_1_4), .io_in_2_bits_vc_sel_1_5 (io_req_2_0_bits_vc_sel_1_5), .io_in_2_bits_vc_sel_1_6 (io_req_2_0_bits_vc_sel_1_6), .io_in_2_bits_vc_sel_1_7 (io_req_2_0_bits_vc_sel_1_7), .io_in_2_bits_vc_sel_1_8 (io_req_2_0_bits_vc_sel_1_8), .io_in_2_bits_vc_sel_1_9 (io_req_2_0_bits_vc_sel_1_9), .io_in_2_bits_vc_sel_0_2 (io_req_2_0_bits_vc_sel_0_2), .io_in_2_bits_vc_sel_0_3 (io_req_2_0_bits_vc_sel_0_3), .io_in_2_bits_vc_sel_0_4 (io_req_2_0_bits_vc_sel_0_4), .io_in_2_bits_vc_sel_0_5 (io_req_2_0_bits_vc_sel_0_5), .io_in_2_bits_vc_sel_0_6 (io_req_2_0_bits_vc_sel_0_6), .io_in_2_bits_vc_sel_0_7 (io_req_2_0_bits_vc_sel_0_7), .io_in_2_bits_vc_sel_0_8 (io_req_2_0_bits_vc_sel_0_8), .io_in_2_bits_vc_sel_0_9 (io_req_2_0_bits_vc_sel_0_9), .io_in_2_bits_tail (io_req_2_0_bits_tail), .io_out_0_valid (_arbs_1_io_out_0_valid), .io_out_0_bits_vc_sel_2_2 (/* unused */), .io_out_0_bits_vc_sel_2_3 (/* unused */), .io_out_0_bits_vc_sel_2_4 (/* unused */), .io_out_0_bits_vc_sel_2_5 (/* unused */), .io_out_0_bits_vc_sel_2_6 (/* unused */), .io_out_0_bits_vc_sel_2_7 (/* unused */), .io_out_0_bits_vc_sel_2_8 (/* unused */), .io_out_0_bits_vc_sel_2_9 (/* unused */), .io_out_0_bits_vc_sel_1_2 (_arbs_1_io_out_0_bits_vc_sel_1_2), .io_out_0_bits_vc_sel_1_3 (_arbs_1_io_out_0_bits_vc_sel_1_3), .io_out_0_bits_vc_sel_1_4 (_arbs_1_io_out_0_bits_vc_sel_1_4), .io_out_0_bits_vc_sel_1_5 (_arbs_1_io_out_0_bits_vc_sel_1_5), .io_out_0_bits_vc_sel_1_6 (_arbs_1_io_out_0_bits_vc_sel_1_6), .io_out_0_bits_vc_sel_1_7 (_arbs_1_io_out_0_bits_vc_sel_1_7), .io_out_0_bits_vc_sel_1_8 (_arbs_1_io_out_0_bits_vc_sel_1_8), .io_out_0_bits_vc_sel_1_9 (_arbs_1_io_out_0_bits_vc_sel_1_9), .io_out_0_bits_vc_sel_0_2 (/* unused */), .io_out_0_bits_vc_sel_0_3 (/* unused */), .io_out_0_bits_vc_sel_0_4 (/* unused */), .io_out_0_bits_vc_sel_0_5 (/* unused */), .io_out_0_bits_vc_sel_0_6 (/* unused */), .io_out_0_bits_vc_sel_0_7 (/* unused */), .io_out_0_bits_vc_sel_0_8 (/* unused */), .io_out_0_bits_vc_sel_0_9 (/* unused */), .io_chosen_oh_0 (_arbs_1_io_chosen_oh_0) ); // @[SwitchAllocator.scala:83:45] SwitchArbiter_313 arbs_2 ( // @[SwitchAllocator.scala:83:45] .clock (clock), .reset (reset), .io_in_0_ready (_arbs_2_io_in_0_ready), .io_in_0_valid (arbs_2_io_in_0_valid), // @[SwitchAllocator.scala:95:37] .io_in_0_bits_vc_sel_2_2 (io_req_0_0_bits_vc_sel_2_2), .io_in_0_bits_vc_sel_2_3 (io_req_0_0_bits_vc_sel_2_3), .io_in_0_bits_vc_sel_2_4 (io_req_0_0_bits_vc_sel_2_4), .io_in_0_bits_vc_sel_2_5 (io_req_0_0_bits_vc_sel_2_5), .io_in_0_bits_vc_sel_2_6 (io_req_0_0_bits_vc_sel_2_6), .io_in_0_bits_vc_sel_2_7 (io_req_0_0_bits_vc_sel_2_7), .io_in_0_bits_vc_sel_2_8 (io_req_0_0_bits_vc_sel_2_8), .io_in_0_bits_vc_sel_2_9 (io_req_0_0_bits_vc_sel_2_9), .io_in_0_bits_vc_sel_1_2 (io_req_0_0_bits_vc_sel_1_2), .io_in_0_bits_vc_sel_1_3 (io_req_0_0_bits_vc_sel_1_3), .io_in_0_bits_vc_sel_1_4 (io_req_0_0_bits_vc_sel_1_4), .io_in_0_bits_vc_sel_1_5 (io_req_0_0_bits_vc_sel_1_5), .io_in_0_bits_vc_sel_1_6 (io_req_0_0_bits_vc_sel_1_6), .io_in_0_bits_vc_sel_1_7 (io_req_0_0_bits_vc_sel_1_7), .io_in_0_bits_vc_sel_1_8 (io_req_0_0_bits_vc_sel_1_8), .io_in_0_bits_vc_sel_1_9 (io_req_0_0_bits_vc_sel_1_9), .io_in_0_bits_vc_sel_0_2 (io_req_0_0_bits_vc_sel_0_2), .io_in_0_bits_vc_sel_0_3 (io_req_0_0_bits_vc_sel_0_3), .io_in_0_bits_vc_sel_0_4 (io_req_0_0_bits_vc_sel_0_4), .io_in_0_bits_vc_sel_0_5 (io_req_0_0_bits_vc_sel_0_5), .io_in_0_bits_vc_sel_0_6 (io_req_0_0_bits_vc_sel_0_6), .io_in_0_bits_vc_sel_0_7 (io_req_0_0_bits_vc_sel_0_7), .io_in_0_bits_vc_sel_0_8 (io_req_0_0_bits_vc_sel_0_8), .io_in_0_bits_vc_sel_0_9 (io_req_0_0_bits_vc_sel_0_9), .io_in_0_bits_tail (io_req_0_0_bits_tail), .io_in_1_ready (_arbs_2_io_in_1_ready), .io_in_1_valid (arbs_2_io_in_1_valid), // @[SwitchAllocator.scala:95:37] .io_in_1_bits_vc_sel_2_2 (io_req_1_0_bits_vc_sel_2_2), .io_in_1_bits_vc_sel_2_3 (io_req_1_0_bits_vc_sel_2_3), .io_in_1_bits_vc_sel_2_4 (io_req_1_0_bits_vc_sel_2_4), .io_in_1_bits_vc_sel_2_5 (io_req_1_0_bits_vc_sel_2_5), .io_in_1_bits_vc_sel_2_6 (io_req_1_0_bits_vc_sel_2_6), .io_in_1_bits_vc_sel_2_7 (io_req_1_0_bits_vc_sel_2_7), .io_in_1_bits_vc_sel_2_8 (io_req_1_0_bits_vc_sel_2_8), .io_in_1_bits_vc_sel_2_9 (io_req_1_0_bits_vc_sel_2_9), .io_in_1_bits_vc_sel_1_2 (io_req_1_0_bits_vc_sel_1_2), .io_in_1_bits_vc_sel_1_3 (io_req_1_0_bits_vc_sel_1_3), .io_in_1_bits_vc_sel_1_4 (io_req_1_0_bits_vc_sel_1_4), .io_in_1_bits_vc_sel_1_5 (io_req_1_0_bits_vc_sel_1_5), .io_in_1_bits_vc_sel_1_6 (io_req_1_0_bits_vc_sel_1_6), .io_in_1_bits_vc_sel_1_7 (io_req_1_0_bits_vc_sel_1_7), .io_in_1_bits_vc_sel_1_8 (io_req_1_0_bits_vc_sel_1_8), .io_in_1_bits_vc_sel_1_9 (io_req_1_0_bits_vc_sel_1_9), .io_in_1_bits_vc_sel_0_2 (io_req_1_0_bits_vc_sel_0_2), .io_in_1_bits_vc_sel_0_3 (io_req_1_0_bits_vc_sel_0_3), .io_in_1_bits_vc_sel_0_4 (io_req_1_0_bits_vc_sel_0_4), .io_in_1_bits_vc_sel_0_5 (io_req_1_0_bits_vc_sel_0_5), .io_in_1_bits_vc_sel_0_6 (io_req_1_0_bits_vc_sel_0_6), .io_in_1_bits_vc_sel_0_7 (io_req_1_0_bits_vc_sel_0_7), .io_in_1_bits_vc_sel_0_8 (io_req_1_0_bits_vc_sel_0_8), .io_in_1_bits_vc_sel_0_9 (io_req_1_0_bits_vc_sel_0_9), .io_in_1_bits_tail (io_req_1_0_bits_tail), .io_in_2_ready (_arbs_2_io_in_2_ready), .io_in_2_valid (arbs_2_io_in_2_valid), // @[SwitchAllocator.scala:95:37] .io_in_2_bits_vc_sel_2_2 (io_req_2_0_bits_vc_sel_2_2), .io_in_2_bits_vc_sel_2_3 (io_req_2_0_bits_vc_sel_2_3), .io_in_2_bits_vc_sel_2_4 (io_req_2_0_bits_vc_sel_2_4), .io_in_2_bits_vc_sel_2_5 (io_req_2_0_bits_vc_sel_2_5), .io_in_2_bits_vc_sel_2_6 (io_req_2_0_bits_vc_sel_2_6), .io_in_2_bits_vc_sel_2_7 (io_req_2_0_bits_vc_sel_2_7), .io_in_2_bits_vc_sel_2_8 (io_req_2_0_bits_vc_sel_2_8), .io_in_2_bits_vc_sel_2_9 (io_req_2_0_bits_vc_sel_2_9), .io_in_2_bits_vc_sel_1_2 (io_req_2_0_bits_vc_sel_1_2), .io_in_2_bits_vc_sel_1_3 (io_req_2_0_bits_vc_sel_1_3), .io_in_2_bits_vc_sel_1_4 (io_req_2_0_bits_vc_sel_1_4), .io_in_2_bits_vc_sel_1_5 (io_req_2_0_bits_vc_sel_1_5), .io_in_2_bits_vc_sel_1_6 (io_req_2_0_bits_vc_sel_1_6), .io_in_2_bits_vc_sel_1_7 (io_req_2_0_bits_vc_sel_1_7), .io_in_2_bits_vc_sel_1_8 (io_req_2_0_bits_vc_sel_1_8), .io_in_2_bits_vc_sel_1_9 (io_req_2_0_bits_vc_sel_1_9), .io_in_2_bits_vc_sel_0_2 (io_req_2_0_bits_vc_sel_0_2), .io_in_2_bits_vc_sel_0_3 (io_req_2_0_bits_vc_sel_0_3), .io_in_2_bits_vc_sel_0_4 (io_req_2_0_bits_vc_sel_0_4), .io_in_2_bits_vc_sel_0_5 (io_req_2_0_bits_vc_sel_0_5), .io_in_2_bits_vc_sel_0_6 (io_req_2_0_bits_vc_sel_0_6), .io_in_2_bits_vc_sel_0_7 (io_req_2_0_bits_vc_sel_0_7), .io_in_2_bits_vc_sel_0_8 (io_req_2_0_bits_vc_sel_0_8), .io_in_2_bits_vc_sel_0_9 (io_req_2_0_bits_vc_sel_0_9), .io_in_2_bits_tail (io_req_2_0_bits_tail), .io_out_0_valid (_arbs_2_io_out_0_valid), .io_out_0_bits_vc_sel_2_2 (_arbs_2_io_out_0_bits_vc_sel_2_2), .io_out_0_bits_vc_sel_2_3 (_arbs_2_io_out_0_bits_vc_sel_2_3), .io_out_0_bits_vc_sel_2_4 (_arbs_2_io_out_0_bits_vc_sel_2_4), .io_out_0_bits_vc_sel_2_5 (_arbs_2_io_out_0_bits_vc_sel_2_5), .io_out_0_bits_vc_sel_2_6 (_arbs_2_io_out_0_bits_vc_sel_2_6), .io_out_0_bits_vc_sel_2_7 (_arbs_2_io_out_0_bits_vc_sel_2_7), .io_out_0_bits_vc_sel_2_8 (_arbs_2_io_out_0_bits_vc_sel_2_8), .io_out_0_bits_vc_sel_2_9 (_arbs_2_io_out_0_bits_vc_sel_2_9), .io_out_0_bits_vc_sel_1_2 (/* unused */), .io_out_0_bits_vc_sel_1_3 (/* unused */), .io_out_0_bits_vc_sel_1_4 (/* unused */), .io_out_0_bits_vc_sel_1_5 (/* unused */), .io_out_0_bits_vc_sel_1_6 (/* unused */), .io_out_0_bits_vc_sel_1_7 (/* unused */), .io_out_0_bits_vc_sel_1_8 (/* unused */), .io_out_0_bits_vc_sel_1_9 (/* unused */), .io_out_0_bits_vc_sel_0_2 (/* unused */), .io_out_0_bits_vc_sel_0_3 (/* unused */), .io_out_0_bits_vc_sel_0_4 (/* unused */), .io_out_0_bits_vc_sel_0_5 (/* unused */), .io_out_0_bits_vc_sel_0_6 (/* unused */), .io_out_0_bits_vc_sel_0_7 (/* unused */), .io_out_0_bits_vc_sel_0_8 (/* unused */), .io_out_0_bits_vc_sel_0_9 (/* unused */), .io_chosen_oh_0 (_arbs_2_io_chosen_oh_0) ); // @[SwitchAllocator.scala:83:45] assign io_req_2_0_ready = _arbs_0_io_in_2_ready & arbs_0_io_in_2_valid | _arbs_1_io_in_2_ready & arbs_1_io_in_2_valid | _arbs_2_io_in_2_ready & arbs_2_io_in_2_valid; // @[Decoupled.scala:51:35] assign io_req_1_0_ready = _arbs_0_io_in_1_ready & arbs_0_io_in_1_valid | _arbs_1_io_in_1_ready & arbs_1_io_in_1_valid | _arbs_2_io_in_1_ready & arbs_2_io_in_1_valid; // @[Decoupled.scala:51:35] assign io_req_0_0_ready = _arbs_0_io_in_0_ready & arbs_0_io_in_0_valid | _arbs_1_io_in_0_ready & arbs_1_io_in_0_valid | _arbs_2_io_in_0_ready & arbs_2_io_in_0_valid; // @[Decoupled.scala:51:35] assign io_credit_alloc_2_2_alloc = _arbs_2_io_out_0_valid & _arbs_2_io_out_0_bits_vc_sel_2_2; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_2_3_alloc = _arbs_2_io_out_0_valid & _arbs_2_io_out_0_bits_vc_sel_2_3; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_2_4_alloc = _arbs_2_io_out_0_valid & _arbs_2_io_out_0_bits_vc_sel_2_4; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_2_5_alloc = _arbs_2_io_out_0_valid & _arbs_2_io_out_0_bits_vc_sel_2_5; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_2_6_alloc = _arbs_2_io_out_0_valid & _arbs_2_io_out_0_bits_vc_sel_2_6; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_2_7_alloc = _arbs_2_io_out_0_valid & _arbs_2_io_out_0_bits_vc_sel_2_7; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_2_8_alloc = _arbs_2_io_out_0_valid & _arbs_2_io_out_0_bits_vc_sel_2_8; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_2_9_alloc = _arbs_2_io_out_0_valid & _arbs_2_io_out_0_bits_vc_sel_2_9; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_1_2_alloc = _arbs_1_io_out_0_valid & _arbs_1_io_out_0_bits_vc_sel_1_2; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_1_3_alloc = _arbs_1_io_out_0_valid & _arbs_1_io_out_0_bits_vc_sel_1_3; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_1_4_alloc = _arbs_1_io_out_0_valid & _arbs_1_io_out_0_bits_vc_sel_1_4; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_1_5_alloc = _arbs_1_io_out_0_valid & _arbs_1_io_out_0_bits_vc_sel_1_5; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_1_6_alloc = _arbs_1_io_out_0_valid & _arbs_1_io_out_0_bits_vc_sel_1_6; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_1_7_alloc = _arbs_1_io_out_0_valid & _arbs_1_io_out_0_bits_vc_sel_1_7; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_1_8_alloc = _arbs_1_io_out_0_valid & _arbs_1_io_out_0_bits_vc_sel_1_8; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_1_9_alloc = _arbs_1_io_out_0_valid & _arbs_1_io_out_0_bits_vc_sel_1_9; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_0_2_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_2; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_0_3_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_3; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_0_4_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_4; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_0_5_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_5; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_0_6_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_6; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_0_7_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_7; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_0_8_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_8; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_credit_alloc_0_9_alloc = _arbs_0_io_out_0_valid & _arbs_0_io_out_0_bits_vc_sel_0_9; // @[SwitchAllocator.scala:64:7, :83:45, :120:33] assign io_switch_sel_2_0_2_0 = arbs_2_io_in_2_valid & _arbs_2_io_chosen_oh_0[2] & _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_2_0_1_0 = arbs_2_io_in_1_valid & _arbs_2_io_chosen_oh_0[1] & _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_2_0_0_0 = arbs_2_io_in_0_valid & _arbs_2_io_chosen_oh_0[0] & _arbs_2_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_1_0_2_0 = arbs_1_io_in_2_valid & _arbs_1_io_chosen_oh_0[2] & _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_1_0_1_0 = arbs_1_io_in_1_valid & _arbs_1_io_chosen_oh_0[1] & _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_1_0_0_0 = arbs_1_io_in_0_valid & _arbs_1_io_chosen_oh_0[0] & _arbs_1_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_0_0_2_0 = arbs_0_io_in_2_valid & _arbs_0_io_chosen_oh_0[2] & _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_0_0_1_0 = arbs_0_io_in_1_valid & _arbs_0_io_chosen_oh_0[1] & _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] assign io_switch_sel_0_0_0_0 = arbs_0_io_in_0_valid & _arbs_0_io_chosen_oh_0[0] & _arbs_0_io_out_0_valid; // @[SwitchAllocator.scala:64:7, :83:45, :95:37, :108:{65,91,97}] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ListBuffer.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import freechips.rocketchip.util._ case class ListBufferParameters[T <: Data](gen: T, queues: Int, entries: Int, bypass: Boolean) { val queueBits = log2Up(queues) val entryBits = log2Up(entries) } class ListBufferPush[T <: Data](params: ListBufferParameters[T]) extends Bundle { val index = UInt(params.queueBits.W) val data = Output(params.gen) } class ListBuffer[T <: Data](params: ListBufferParameters[T]) extends Module { override def desiredName = s"ListBuffer_${params.gen.typeName}_q${params.queues}_e${params.entries}" val io = IO(new Bundle { // push is visible on the same cycle; flow queues val push = Flipped(Decoupled(new ListBufferPush(params))) val valid = UInt(params.queues.W) val pop = Flipped(Valid(UInt(params.queueBits.W))) val data = Output(params.gen) }) val valid = RegInit(0.U(params.queues.W)) val head = Mem(params.queues, UInt(params.entryBits.W)) val tail = Mem(params.queues, UInt(params.entryBits.W)) val used = RegInit(0.U(params.entries.W)) val next = Mem(params.entries, UInt(params.entryBits.W)) val data = Mem(params.entries, params.gen) val freeOH = ~(leftOR(~used) << 1) & ~used val freeIdx = OHToUInt(freeOH) val valid_set = WireDefault(0.U(params.queues.W)) val valid_clr = WireDefault(0.U(params.queues.W)) val used_set = WireDefault(0.U(params.entries.W)) val used_clr = WireDefault(0.U(params.entries.W)) val push_tail = tail.read(io.push.bits.index) val push_valid = valid(io.push.bits.index) io.push.ready := !used.andR when (io.push.fire) { valid_set := UIntToOH(io.push.bits.index, params.queues) used_set := freeOH data.write(freeIdx, io.push.bits.data) when (push_valid) { next.write(push_tail, freeIdx) } .otherwise { head.write(io.push.bits.index, freeIdx) } tail.write(io.push.bits.index, freeIdx) } val pop_head = head.read(io.pop.bits) val pop_valid = valid(io.pop.bits) // Bypass push data to the peek port io.data := (if (!params.bypass) data.read(pop_head) else Mux(!pop_valid, io.push.bits.data, data.read(pop_head))) io.valid := (if (!params.bypass) valid else (valid | valid_set)) // It is an error to pop something that is not valid assert (!io.pop.fire || (io.valid)(io.pop.bits)) when (io.pop.fire) { used_clr := UIntToOH(pop_head, params.entries) when (pop_head === tail.read(io.pop.bits)) { valid_clr := UIntToOH(io.pop.bits, params.queues) } head.write(io.pop.bits, Mux(io.push.fire && push_valid && push_tail === pop_head, freeIdx, next.read(pop_head))) } // Empty bypass changes no state when ((!params.bypass).B || !io.pop.valid || pop_valid) { used := (used & ~used_clr) | used_set valid := (valid & ~valid_clr) | valid_set } }
module ListBuffer_PutBufferCEntry_q2_e8_6( // @[ListBuffer.scala:36:7] input clock, // @[ListBuffer.scala:36:7] input reset, // @[ListBuffer.scala:36:7] output io_push_ready, // @[ListBuffer.scala:39:14] input io_push_valid, // @[ListBuffer.scala:39:14] input io_push_bits_index, // @[ListBuffer.scala:39:14] input [127:0] io_push_bits_data_data, // @[ListBuffer.scala:39:14] input io_push_bits_data_corrupt, // @[ListBuffer.scala:39:14] output [1:0] io_valid, // @[ListBuffer.scala:39:14] input io_pop_valid, // @[ListBuffer.scala:39:14] input io_pop_bits, // @[ListBuffer.scala:39:14] output [127:0] io_data_data, // @[ListBuffer.scala:39:14] output io_data_corrupt // @[ListBuffer.scala:39:14] ); wire [128:0] _data_ext_R0_data; // @[ListBuffer.scala:52:18] wire [2:0] _next_ext_R0_data; // @[ListBuffer.scala:51:18] wire [2:0] _tail_ext_R0_data; // @[ListBuffer.scala:49:18] wire [2:0] _tail_ext_R1_data; // @[ListBuffer.scala:49:18] wire [2:0] _head_ext_R0_data; // @[ListBuffer.scala:48:18] wire io_push_valid_0 = io_push_valid; // @[ListBuffer.scala:36:7] wire io_push_bits_index_0 = io_push_bits_index; // @[ListBuffer.scala:36:7] wire [127:0] io_push_bits_data_data_0 = io_push_bits_data_data; // @[ListBuffer.scala:36:7] wire io_push_bits_data_corrupt_0 = io_push_bits_data_corrupt; // @[ListBuffer.scala:36:7] wire io_pop_valid_0 = io_pop_valid; // @[ListBuffer.scala:36:7] wire io_pop_bits_0 = io_pop_bits; // @[ListBuffer.scala:36:7] wire _io_push_ready_T_1; // @[ListBuffer.scala:65:20] wire valid_set_shiftAmount = io_push_bits_index_0; // @[OneHot.scala:64:49] wire valid_clr_shiftAmount = io_pop_bits_0; // @[OneHot.scala:64:49] wire io_push_ready_0; // @[ListBuffer.scala:36:7] wire [127:0] io_data_data_0; // @[ListBuffer.scala:36:7] wire io_data_corrupt_0; // @[ListBuffer.scala:36:7] wire [1:0] io_valid_0; // @[ListBuffer.scala:36:7] reg [1:0] valid; // @[ListBuffer.scala:47:22] assign io_valid_0 = valid; // @[ListBuffer.scala:36:7, :47:22] reg [7:0] used; // @[ListBuffer.scala:50:22] assign io_data_data_0 = _data_ext_R0_data[127:0]; // @[ListBuffer.scala:36:7, :52:18] assign io_data_corrupt_0 = _data_ext_R0_data[128]; // @[ListBuffer.scala:36:7, :52:18] wire [7:0] _freeOH_T = ~used; // @[ListBuffer.scala:50:22, :54:25] wire [8:0] _freeOH_T_1 = {_freeOH_T, 1'h0}; // @[package.scala:253:48] wire [7:0] _freeOH_T_2 = _freeOH_T_1[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _freeOH_T_3 = _freeOH_T | _freeOH_T_2; // @[package.scala:253:{43,53}] wire [9:0] _freeOH_T_4 = {_freeOH_T_3, 2'h0}; // @[package.scala:253:{43,48}] wire [7:0] _freeOH_T_5 = _freeOH_T_4[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _freeOH_T_6 = _freeOH_T_3 | _freeOH_T_5; // @[package.scala:253:{43,53}] wire [11:0] _freeOH_T_7 = {_freeOH_T_6, 4'h0}; // @[package.scala:253:{43,48}] wire [7:0] _freeOH_T_8 = _freeOH_T_7[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _freeOH_T_9 = _freeOH_T_6 | _freeOH_T_8; // @[package.scala:253:{43,53}] wire [7:0] _freeOH_T_10 = _freeOH_T_9; // @[package.scala:253:43, :254:17] wire [8:0] _freeOH_T_11 = {_freeOH_T_10, 1'h0}; // @[package.scala:254:17] wire [8:0] _freeOH_T_12 = ~_freeOH_T_11; // @[ListBuffer.scala:54:{16,32}] wire [7:0] _freeOH_T_13 = ~used; // @[ListBuffer.scala:50:22, :54:{25,40}] wire [8:0] freeOH = {1'h0, _freeOH_T_12[7:0] & _freeOH_T_13}; // @[ListBuffer.scala:54:{16,38,40}] wire freeIdx_hi = freeOH[8]; // @[OneHot.scala:30:18] wire _freeIdx_T = freeIdx_hi; // @[OneHot.scala:30:18, :32:14] wire [7:0] freeIdx_lo = freeOH[7:0]; // @[OneHot.scala:31:18] wire [7:0] _freeIdx_T_1 = {7'h0, freeIdx_hi} | freeIdx_lo; // @[OneHot.scala:30:18, :31:18, :32:28] wire [3:0] freeIdx_hi_1 = _freeIdx_T_1[7:4]; // @[OneHot.scala:30:18, :32:28] wire [3:0] freeIdx_lo_1 = _freeIdx_T_1[3:0]; // @[OneHot.scala:31:18, :32:28] wire _freeIdx_T_2 = |freeIdx_hi_1; // @[OneHot.scala:30:18, :32:14] wire [3:0] _freeIdx_T_3 = freeIdx_hi_1 | freeIdx_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28] wire [1:0] freeIdx_hi_2 = _freeIdx_T_3[3:2]; // @[OneHot.scala:30:18, :32:28] wire [1:0] freeIdx_lo_2 = _freeIdx_T_3[1:0]; // @[OneHot.scala:31:18, :32:28] wire _freeIdx_T_4 = |freeIdx_hi_2; // @[OneHot.scala:30:18, :32:14] wire [1:0] _freeIdx_T_5 = freeIdx_hi_2 | freeIdx_lo_2; // @[OneHot.scala:30:18, :31:18, :32:28] wire _freeIdx_T_6 = _freeIdx_T_5[1]; // @[OneHot.scala:32:28] wire [1:0] _freeIdx_T_7 = {_freeIdx_T_4, _freeIdx_T_6}; // @[OneHot.scala:32:{10,14}] wire [2:0] _freeIdx_T_8 = {_freeIdx_T_2, _freeIdx_T_7}; // @[OneHot.scala:32:{10,14}] wire [3:0] freeIdx = {_freeIdx_T, _freeIdx_T_8}; // @[OneHot.scala:32:{10,14}] wire [1:0] valid_set; // @[ListBuffer.scala:57:30] wire [1:0] valid_clr; // @[ListBuffer.scala:58:30] wire [7:0] used_set; // @[ListBuffer.scala:59:30] wire [7:0] used_clr; // @[ListBuffer.scala:60:30] wire [1:0] _push_valid_T = valid >> io_push_bits_index_0; // @[ListBuffer.scala:36:7, :47:22, :63:25] wire push_valid = _push_valid_T[0]; // @[ListBuffer.scala:63:25] wire _io_push_ready_T = &used; // @[ListBuffer.scala:50:22, :65:26] assign _io_push_ready_T_1 = ~_io_push_ready_T; // @[ListBuffer.scala:65:{20,26}] assign io_push_ready_0 = _io_push_ready_T_1; // @[ListBuffer.scala:36:7, :65:20] wire data_MPORT_en = io_push_ready_0 & io_push_valid_0; // @[Decoupled.scala:51:35] wire [1:0] _valid_set_T = 2'h1 << valid_set_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [1:0] _valid_set_T_1 = _valid_set_T; // @[OneHot.scala:65:{12,27}] assign valid_set = data_MPORT_en ? _valid_set_T_1 : 2'h0; // @[OneHot.scala:65:27] assign used_set = data_MPORT_en ? freeIdx_lo : 8'h0; // @[OneHot.scala:31:18] wire [1:0] _GEN = {1'h0, io_pop_bits_0}; // @[ListBuffer.scala:36:7, :79:24] wire [1:0] _pop_valid_T = valid >> _GEN; // @[ListBuffer.scala:47:22, :79:24] wire pop_valid = _pop_valid_T[0]; // @[ListBuffer.scala:79:24]
Generate the Verilog code corresponding to the following Chisel files. File Buffer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.BufferParams class TLBufferNode ( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit valName: ValName) extends TLAdapterNode( clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) }, managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) } ) { override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}" override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none) } class TLBuffer( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters) extends LazyModule { def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace) def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde) def this()(implicit p: Parameters) = this(BufferParams.default) val node = new TLBufferNode(a, b, c, d, e) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def headBundle = node.out.head._2.bundle override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.a <> a(in .a) in .d <> d(out.d) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> b(out.b) out.c <> c(in .c) out.e <> e(in .e) } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLBuffer { def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default) def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde) def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace) def apply( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters): TLNode = { val buffer = LazyModule(new TLBuffer(a, b, c, d, e)) buffer.node } def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = { val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) } name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } } buffers.map(_.node) } def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = { chain(depth, name) .reduceLeftOption(_ :*=* _) .getOrElse(TLNameNode("no_buffer")) } } File Fragmenter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressSet, BufferParams, IdRange, TransferSizes} import freechips.rocketchip.util.{Repeater, OH1ToUInt, UIntToOH1} import scala.math.min import freechips.rocketchip.util.DataToAugmentedData object EarlyAck { sealed trait T case object AllPuts extends T case object PutFulls extends T case object None extends T } // minSize: minimum size of transfers supported by all outward managers // maxSize: maximum size of transfers supported after the Fragmenter is applied // alwaysMin: fragment all requests down to minSize (else fragment to maximum supported by manager) // earlyAck: should a multibeat Put should be acknowledged on the first beat or last beat // holdFirstDeny: allow the Fragmenter to unsafely combine multibeat Gets by taking the first denied for the whole burst // nameSuffix: appends a suffix to the module name // Fragmenter modifies: PutFull, PutPartial, LogicalData, Get, Hint // Fragmenter passes: ArithmeticData (truncated to minSize if alwaysMin) // Fragmenter cannot modify acquire (could livelock); thus it is unsafe to put caches on both sides class TLFragmenter(val minSize: Int, val maxSize: Int, val alwaysMin: Boolean = false, val earlyAck: EarlyAck.T = EarlyAck.None, val holdFirstDeny: Boolean = false, val nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule { require(isPow2 (maxSize), s"TLFragmenter expects pow2(maxSize), but got $maxSize") require(isPow2 (minSize), s"TLFragmenter expects pow2(minSize), but got $minSize") require(minSize <= maxSize, s"TLFragmenter expects min <= max, but got $minSize > $maxSize") val fragmentBits = log2Ceil(maxSize / minSize) val fullBits = if (earlyAck == EarlyAck.PutFulls) 1 else 0 val toggleBits = 1 val addedBits = fragmentBits + toggleBits + fullBits def expandTransfer(x: TransferSizes, op: String) = if (!x) x else { // validate that we can apply the fragmenter correctly require (x.max >= minSize, s"TLFragmenter (with parent $parent) max transfer size $op(${x.max}) must be >= min transfer size (${minSize})") TransferSizes(x.min, maxSize) } private def noChangeRequired = minSize == maxSize private def shrinkTransfer(x: TransferSizes) = if (!alwaysMin) x else if (x.min <= minSize) TransferSizes(x.min, min(minSize, x.max)) else TransferSizes.none private def mapManager(m: TLSlaveParameters) = m.v1copy( supportsArithmetic = shrinkTransfer(m.supportsArithmetic), supportsLogical = shrinkTransfer(m.supportsLogical), supportsGet = expandTransfer(m.supportsGet, "Get"), supportsPutFull = expandTransfer(m.supportsPutFull, "PutFull"), supportsPutPartial = expandTransfer(m.supportsPutPartial, "PutParital"), supportsHint = expandTransfer(m.supportsHint, "Hint")) val node = new TLAdapterNode( // We require that all the responses are mutually FIFO // Thus we need to compact all of the masters into one big master clientFn = { c => (if (noChangeRequired) c else c.v2copy( masters = Seq(TLMasterParameters.v2( name = "TLFragmenter", sourceId = IdRange(0, if (minSize == maxSize) c.endSourceId else (c.endSourceId << addedBits)), requestFifo = true, emits = TLMasterToSlaveTransferSizes( acquireT = shrinkTransfer(c.masters.map(_.emits.acquireT) .reduce(_ mincover _)), acquireB = shrinkTransfer(c.masters.map(_.emits.acquireB) .reduce(_ mincover _)), arithmetic = shrinkTransfer(c.masters.map(_.emits.arithmetic).reduce(_ mincover _)), logical = shrinkTransfer(c.masters.map(_.emits.logical) .reduce(_ mincover _)), get = shrinkTransfer(c.masters.map(_.emits.get) .reduce(_ mincover _)), putFull = shrinkTransfer(c.masters.map(_.emits.putFull) .reduce(_ mincover _)), putPartial = shrinkTransfer(c.masters.map(_.emits.putPartial).reduce(_ mincover _)), hint = shrinkTransfer(c.masters.map(_.emits.hint) .reduce(_ mincover _)) ) )) ))}, managerFn = { m => if (noChangeRequired) m else m.v2copy(slaves = m.slaves.map(mapManager)) } ) { override def circuitIdentity = noChangeRequired } lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = (Seq("TLFragmenter") ++ nameSuffix).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => if (noChangeRequired) { out <> in } else { // All managers must share a common FIFO domain (responses might end up interleaved) val manager = edgeOut.manager val managers = manager.managers val beatBytes = manager.beatBytes val fifoId = managers(0).fifoId require (fifoId.isDefined && managers.map(_.fifoId == fifoId).reduce(_ && _)) require (!manager.anySupportAcquireB || !edgeOut.client.anySupportProbe, s"TLFragmenter (with parent $parent) can't fragment a caching client's requests into a cacheable region") require (minSize >= beatBytes, s"TLFragmenter (with parent $parent) can't support fragmenting ($minSize) to sub-beat ($beatBytes) accesses") // We can't support devices which are cached on both sides of us require (!edgeOut.manager.anySupportAcquireB || !edgeIn.client.anySupportProbe) // We can't support denied because we reassemble fragments require (!edgeOut.manager.mayDenyGet || holdFirstDeny, s"TLFragmenter (with parent $parent) can't support denials without holdFirstDeny=true") require (!edgeOut.manager.mayDenyPut || earlyAck == EarlyAck.None) /* The Fragmenter is a bit tricky, because there are 5 sizes in play: * max size -- the maximum transfer size possible * orig size -- the original pre-fragmenter size * frag size -- the modified post-fragmenter size * min size -- the threshold below which frag=orig * beat size -- the amount transfered on any given beat * * The relationships are as follows: * max >= orig >= frag * max > min >= beat * It IS possible that orig <= min (then frag=orig; ie: no fragmentation) * * The fragment# (sent via TL.source) is measured in multiples of min size. * Meanwhile, to track the progress, counters measure in multiples of beat size. * * Here is an example of a bus with max=256, min=8, beat=4 and a device supporting 16. * * in.A out.A (frag#) out.D (frag#) in.D gen# ack# * get64 get16 6 ackD16 6 ackD64 12 15 * ackD16 6 ackD64 14 * ackD16 6 ackD64 13 * ackD16 6 ackD64 12 * get16 4 ackD16 4 ackD64 8 11 * ackD16 4 ackD64 10 * ackD16 4 ackD64 9 * ackD16 4 ackD64 8 * get16 2 ackD16 2 ackD64 4 7 * ackD16 2 ackD64 6 * ackD16 2 ackD64 5 * ackD16 2 ackD64 4 * get16 0 ackD16 0 ackD64 0 3 * ackD16 0 ackD64 2 * ackD16 0 ackD64 1 * ackD16 0 ackD64 0 * * get8 get8 0 ackD8 0 ackD8 0 1 * ackD8 0 ackD8 0 * * get4 get4 0 ackD4 0 ackD4 0 0 * get1 get1 0 ackD1 0 ackD1 0 0 * * put64 put16 6 15 * put64 put16 6 14 * put64 put16 6 13 * put64 put16 6 ack16 6 12 12 * put64 put16 4 11 * put64 put16 4 10 * put64 put16 4 9 * put64 put16 4 ack16 4 8 8 * put64 put16 2 7 * put64 put16 2 6 * put64 put16 2 5 * put64 put16 2 ack16 2 4 4 * put64 put16 0 3 * put64 put16 0 2 * put64 put16 0 1 * put64 put16 0 ack16 0 ack64 0 0 * * put8 put8 0 1 * put8 put8 0 ack8 0 ack8 0 0 * * put4 put4 0 ack4 0 ack4 0 0 * put1 put1 0 ack1 0 ack1 0 0 */ val counterBits = log2Up(maxSize/beatBytes) val maxDownSize = if (alwaysMin) minSize else min(manager.maxTransfer, maxSize) // Consider the following waveform for two 4-beat bursts: // ---A----A------------ // -------D-----DDD-DDDD // Under TL rules, the second A can use the same source as the first A, // because the source is released for reuse on the first response beat. // // However, if we fragment the requests, it looks like this: // ---3210-3210--------- // -------3-----210-3210 // ... now we've broken the rules because 210 are twice inflight. // // This phenomenon means we can have essentially 2*maxSize/minSize-1 // fragmented transactions in flight per original transaction source. // // To keep the source unique, we encode the beat counter in the low // bits of the source. To solve the overlap, we use a toggle bit. // Whatever toggle bit the D is reassembling, A will use the opposite. // First, handle the return path val acknum = RegInit(0.U(counterBits.W)) val dOrig = Reg(UInt()) val dToggle = RegInit(false.B) val dFragnum = out.d.bits.source(fragmentBits-1, 0) val dFirst = acknum === 0.U val dLast = dFragnum === 0.U // only for AccessAck (!Data) val dsizeOH = UIntToOH (out.d.bits.size, log2Ceil(maxDownSize)+1) val dsizeOH1 = UIntToOH1(out.d.bits.size, log2Up(maxDownSize)) val dHasData = edgeOut.hasData(out.d.bits) // calculate new acknum val acknum_fragment = dFragnum << log2Ceil(minSize/beatBytes) val acknum_size = dsizeOH1 >> log2Ceil(beatBytes) assert (!out.d.valid || (acknum_fragment & acknum_size) === 0.U) val dFirst_acknum = acknum_fragment | Mux(dHasData, acknum_size, 0.U) val ack_decrement = Mux(dHasData, 1.U, dsizeOH >> log2Ceil(beatBytes)) // calculate the original size val dFirst_size = OH1ToUInt((dFragnum << log2Ceil(minSize)) | dsizeOH1) when (out.d.fire) { acknum := Mux(dFirst, dFirst_acknum, acknum - ack_decrement) when (dFirst) { dOrig := dFirst_size dToggle := out.d.bits.source(fragmentBits) } } // Swallow up non-data ack fragments val doEarlyAck = earlyAck match { case EarlyAck.AllPuts => true.B case EarlyAck.PutFulls => out.d.bits.source(fragmentBits+1) case EarlyAck.None => false.B } val drop = !dHasData && !Mux(doEarlyAck, dFirst, dLast) out.d.ready := in.d.ready || drop in.d.valid := out.d.valid && !drop in.d.bits := out.d.bits // pass most stuff unchanged in.d.bits.source := out.d.bits.source >> addedBits in.d.bits.size := Mux(dFirst, dFirst_size, dOrig) if (edgeOut.manager.mayDenyPut) { val r_denied = Reg(Bool()) val d_denied = (!dFirst && r_denied) || out.d.bits.denied when (out.d.fire) { r_denied := d_denied } in.d.bits.denied := d_denied } if (edgeOut.manager.mayDenyGet) { // Take denied only from the first beat and hold that value val d_denied = out.d.bits.denied holdUnless dFirst when (dHasData) { in.d.bits.denied := d_denied in.d.bits.corrupt := d_denied || out.d.bits.corrupt } } // What maximum transfer sizes do downstream devices support? val maxArithmetics = managers.map(_.supportsArithmetic.max) val maxLogicals = managers.map(_.supportsLogical.max) val maxGets = managers.map(_.supportsGet.max) val maxPutFulls = managers.map(_.supportsPutFull.max) val maxPutPartials = managers.map(_.supportsPutPartial.max) val maxHints = managers.map(m => if (m.supportsHint) maxDownSize else 0) // We assume that the request is valid => size 0 is impossible val lgMinSize = log2Ceil(minSize).U val maxLgArithmetics = maxArithmetics.map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgLogicals = maxLogicals .map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgGets = maxGets .map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgPutFulls = maxPutFulls .map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgPutPartials = maxPutPartials.map(m => if (m == 0) lgMinSize else log2Ceil(m).U) val maxLgHints = maxHints .map(m => if (m == 0) lgMinSize else log2Ceil(m).U) // Make the request repeatable val repeater = Module(new Repeater(in.a.bits)) repeater.io.enq <> in.a val in_a = repeater.io.deq // If this is infront of a single manager, these become constants val find = manager.findFast(edgeIn.address(in_a.bits)) val maxLgArithmetic = Mux1H(find, maxLgArithmetics) val maxLgLogical = Mux1H(find, maxLgLogicals) val maxLgGet = Mux1H(find, maxLgGets) val maxLgPutFull = Mux1H(find, maxLgPutFulls) val maxLgPutPartial = Mux1H(find, maxLgPutPartials) val maxLgHint = Mux1H(find, maxLgHints) val limit = if (alwaysMin) lgMinSize else MuxLookup(in_a.bits.opcode, lgMinSize)(Array( TLMessages.PutFullData -> maxLgPutFull, TLMessages.PutPartialData -> maxLgPutPartial, TLMessages.ArithmeticData -> maxLgArithmetic, TLMessages.LogicalData -> maxLgLogical, TLMessages.Get -> maxLgGet, TLMessages.Hint -> maxLgHint)) val aOrig = in_a.bits.size val aFrag = Mux(aOrig > limit, limit, aOrig) val aOrigOH1 = UIntToOH1(aOrig, log2Ceil(maxSize)) val aFragOH1 = UIntToOH1(aFrag, log2Up(maxDownSize)) val aHasData = edgeIn.hasData(in_a.bits) val aMask = Mux(aHasData, 0.U, aFragOH1) val gennum = RegInit(0.U(counterBits.W)) val aFirst = gennum === 0.U val old_gennum1 = Mux(aFirst, aOrigOH1 >> log2Ceil(beatBytes), gennum - 1.U) val new_gennum = ~(~old_gennum1 | (aMask >> log2Ceil(beatBytes))) // ~(~x|y) is width safe val aFragnum = ~(~(old_gennum1 >> log2Ceil(minSize/beatBytes)) | (aFragOH1 >> log2Ceil(minSize))) val aLast = aFragnum === 0.U val aToggle = !Mux(aFirst, dToggle, RegEnable(dToggle, aFirst)) val aFull = if (earlyAck == EarlyAck.PutFulls) Some(in_a.bits.opcode === TLMessages.PutFullData) else None when (out.a.fire) { gennum := new_gennum } repeater.io.repeat := !aHasData && aFragnum =/= 0.U out.a <> in_a out.a.bits.address := in_a.bits.address | ~(old_gennum1 << log2Ceil(beatBytes) | ~aOrigOH1 | aFragOH1 | (minSize-1).U) out.a.bits.source := Cat(Seq(in_a.bits.source) ++ aFull ++ Seq(aToggle.asUInt, aFragnum)) out.a.bits.size := aFrag // Optimize away some of the Repeater's registers assert (!repeater.io.full || !aHasData) out.a.bits.data := in.a.bits.data val fullMask = ((BigInt(1) << beatBytes) - 1).U assert (!repeater.io.full || in_a.bits.mask === fullMask) out.a.bits.mask := Mux(repeater.io.full, fullMask, in.a.bits.mask) out.a.bits.user.waiveAll :<= in.a.bits.user.subset(_.isData) // Tie off unused channels in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLFragmenter { def apply(minSize: Int, maxSize: Int, alwaysMin: Boolean = false, earlyAck: EarlyAck.T = EarlyAck.None, holdFirstDeny: Boolean = false, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode = { if (minSize <= maxSize) { val fragmenter = LazyModule(new TLFragmenter(minSize, maxSize, alwaysMin, earlyAck, holdFirstDeny, nameSuffix)) fragmenter.node } else { TLEphemeralNode()(ValName("no_fragmenter")) } } def apply(wrapper: TLBusWrapper, nameSuffix: Option[String])(implicit p: Parameters): TLNode = apply(wrapper.beatBytes, wrapper.blockBytes, nameSuffix = nameSuffix) def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper, None) } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMFragmenter(ramBeatBytes: Int, maxSize: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("Fragmenter")) val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff), beatBytes = ramBeatBytes)) (ram.node := TLDelayer(0.1) := TLBuffer(BufferParams.flow) := TLDelayer(0.1) := TLFragmenter(ramBeatBytes, maxSize, earlyAck = EarlyAck.AllPuts) := TLDelayer(0.1) := TLBuffer(BufferParams.flow) := TLFragmenter(ramBeatBytes, maxSize/2) := TLDelayer(0.1) := TLBuffer(BufferParams.flow) := model.node := fuzz.node) lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMFragmenterTest(ramBeatBytes: Int, maxSize: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMFragmenter(ramBeatBytes,maxSize,txns)).module) io.finished := dut.io.finished dut.io.start := io.start } File BaseTile.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tile import chisel3._ import chisel3.util.{log2Ceil, log2Up} import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.bundlebridge._ import freechips.rocketchip.resources.{PropertyMap, PropertyOption, ResourceReference, DTSTimebase} import freechips.rocketchip.interrupts.{IntInwardNode, IntOutwardNode} import freechips.rocketchip.rocket.{ICacheParams, DCacheParams, BTBParams, ASIdBits, VMIdBits, TraceAux, BPWatch} import freechips.rocketchip.subsystem.{ HierarchicalElementParams, InstantiableHierarchicalElementParams, HierarchicalElementCrossingParamsLike, CacheBlockBytes, SystemBusKey, BaseHierarchicalElement, InsertTimingClosureRegistersOnHartIds, BaseHierarchicalElementModuleImp } import freechips.rocketchip.tilelink.{TLEphemeralNode, TLOutwardNode, TLNode, TLFragmenter, EarlyAck, TLWidthWidget, TLManagerParameters, ManagerUnification} import freechips.rocketchip.prci.{ClockCrossingType, ClockSinkParameters} import freechips.rocketchip.util.{TraceCoreParams, TraceCoreInterface} import freechips.rocketchip.resources.{BigIntToProperty, IntToProperty, StringToProperty} import freechips.rocketchip.util.BooleanToAugmentedBoolean case object TileVisibilityNodeKey extends Field[TLEphemeralNode] case object TileKey extends Field[TileParams] case object LookupByHartId extends Field[LookupByHartIdImpl] trait TileParams extends HierarchicalElementParams { val core: CoreParams val icache: Option[ICacheParams] val dcache: Option[DCacheParams] val btb: Option[BTBParams] val tileId: Int // may not be hartid val blockerCtrlAddr: Option[BigInt] } abstract class InstantiableTileParams[TileType <: BaseTile] extends InstantiableHierarchicalElementParams[TileType] with TileParams { def instantiate(crossing: HierarchicalElementCrossingParamsLike, lookup: LookupByHartIdImpl) (implicit p: Parameters): TileType } /** These parameters values are not computed based on diplomacy negotiation * and so are safe to use while diplomacy itself is running. */ trait HasNonDiplomaticTileParameters { implicit val p: Parameters def tileParams: TileParams = p(TileKey) def usingVM: Boolean = tileParams.core.useVM def usingUser: Boolean = tileParams.core.useUser || usingSupervisor def usingSupervisor: Boolean = tileParams.core.hasSupervisorMode def usingHypervisor: Boolean = usingVM && tileParams.core.useHypervisor def usingDebug: Boolean = tileParams.core.useDebug def usingRoCC: Boolean = !p(BuildRoCC).isEmpty def usingBTB: Boolean = tileParams.btb.isDefined && tileParams.btb.get.nEntries > 0 def usingPTW: Boolean = usingVM def usingDataScratchpad: Boolean = tileParams.dcache.flatMap(_.scratch).isDefined def xLen: Int = tileParams.core.xLen def xBytes: Int = xLen / 8 def iLen: Int = 32 def pgIdxBits: Int = 12 def pgLevelBits: Int = 10 - log2Ceil(xLen / 32) def pgLevels: Int = tileParams.core.pgLevels def maxSVAddrBits: Int = pgIdxBits + pgLevels * pgLevelBits def maxHypervisorExtraAddrBits: Int = 2 def hypervisorExtraAddrBits: Int = { if (usingHypervisor) maxHypervisorExtraAddrBits else 0 } def maxHVAddrBits: Int = maxSVAddrBits + hypervisorExtraAddrBits def minPgLevels: Int = { val res = xLen match { case 32 => 2; case 64 => 3 } require(pgLevels >= res) res } def asIdBits: Int = p(ASIdBits) def vmIdBits: Int = p(VMIdBits) lazy val maxPAddrBits: Int = { require(xLen == 32 || xLen == 64, s"Only XLENs of 32 or 64 are supported, but got $xLen") xLen match { case 32 => 34; case 64 => 56 } } def tileId: Int = tileParams.tileId def cacheBlockBytes = p(CacheBlockBytes) def lgCacheBlockBytes = log2Up(cacheBlockBytes) def masterPortBeatBytes = p(SystemBusKey).beatBytes // TODO make HellaCacheIO diplomatic and remove this brittle collection of hacks // Core PTW DTIM coprocessors def dcacheArbPorts = 1 + usingVM.toInt + usingDataScratchpad.toInt + p(BuildRoCC).size + (tileParams.core.useVector && tileParams.core.vectorUseDCache).toInt // TODO merge with isaString in CSR.scala def isaDTS: String = { val ie = if (tileParams.core.useRVE) "e" else "i" val m = if (tileParams.core.mulDiv.nonEmpty) "m" else "" val a = if (tileParams.core.useAtomics) "a" else "" val f = if (tileParams.core.fpu.nonEmpty) "f" else "" val d = if (tileParams.core.fpu.nonEmpty && tileParams.core.fpu.get.fLen > 32) "d" else "" val c = if (tileParams.core.useCompressed) "c" else "" val b = if (tileParams.core.useBitmanip) "b" else "" val v = if (tileParams.core.useVector && tileParams.core.vLen >= 128 && tileParams.core.eLen == 64 && tileParams.core.vfLen == 64) "v" else "" val h = if (usingHypervisor) "h" else "" val ext_strs = Seq( (tileParams.core.useVector) -> s"zvl${tileParams.core.vLen}b", (tileParams.core.useVector) -> { val c = tileParams.core.vfLen match { case 64 => "d" case 32 => "f" case 0 => "x" } s"zve${tileParams.core.eLen}$c" }, (tileParams.core.useVector && tileParams.core.vfh) -> "zvfh", (tileParams.core.fpu.map(_.fLen >= 16).getOrElse(false) && tileParams.core.minFLen <= 16) -> "zfh", (tileParams.core.useZba) -> "zba", (tileParams.core.useZbb) -> "zbb", (tileParams.core.useZbs) -> "zbs", (tileParams.core.useConditionalZero) -> "zicond" ).filter(_._1).map(_._2) val multiLetterExt = ( // rdcycle[h], rdinstret[h] is implemented // rdtime[h] is not implemented, and could be provided by software emulation // see https://github.com/chipsalliance/rocket-chip/issues/3207 //Some(Seq("zicntr")) ++ Some(Seq("zicsr", "zifencei", "zihpm")) ++ Some(ext_strs) ++ Some(tileParams.core.vExts) ++ tileParams.core.customIsaExt.map(Seq(_)) ).flatten val multiLetterString = multiLetterExt.mkString("_") s"rv$xLen$ie$m$a$f$d$c$b$v$h$multiLetterString" } def tileProperties: PropertyMap = { val dcache = tileParams.dcache.filter(!_.scratch.isDefined).map(d => Map( "d-cache-block-size" -> cacheBlockBytes.asProperty, "d-cache-sets" -> d.nSets.asProperty, "d-cache-size" -> (d.nSets * d.nWays * cacheBlockBytes).asProperty) ).getOrElse(Nil) val incoherent = if (!tileParams.core.useAtomicsOnlyForIO) Nil else Map( "sifive,d-cache-incoherent" -> Nil) val icache = tileParams.icache.map(i => Map( "i-cache-block-size" -> cacheBlockBytes.asProperty, "i-cache-sets" -> i.nSets.asProperty, "i-cache-size" -> (i.nSets * i.nWays * cacheBlockBytes).asProperty) ).getOrElse(Nil) val dtlb = tileParams.dcache.filter(_ => tileParams.core.useVM).map(d => Map( "d-tlb-size" -> (d.nTLBWays * d.nTLBSets).asProperty, "d-tlb-sets" -> d.nTLBSets.asProperty)).getOrElse(Nil) val itlb = tileParams.icache.filter(_ => tileParams.core.useVM).map(i => Map( "i-tlb-size" -> (i.nTLBWays * i.nTLBSets).asProperty, "i-tlb-sets" -> i.nTLBSets.asProperty)).getOrElse(Nil) val mmu = if (tileParams.core.useVM) { if (tileParams.core.useHypervisor) { Map("tlb-split" -> Nil, "mmu-type" -> s"riscv,sv${maxSVAddrBits},sv${maxSVAddrBits}x4".asProperty) } else { Map("tlb-split" -> Nil, "mmu-type" -> s"riscv,sv$maxSVAddrBits".asProperty) } } else { Nil } val pmp = if (tileParams.core.nPMPs > 0) Map( "riscv,pmpregions" -> tileParams.core.nPMPs.asProperty, "riscv,pmpgranularity" -> tileParams.core.pmpGranularity.asProperty) else Nil dcache ++ icache ++ dtlb ++ itlb ++ mmu ++ pmp ++ incoherent } } /** These parameters values are computed based on diplomacy negotiations * and so are NOT safe to use while diplomacy itself is running. * Only mix this trait into LazyModuleImps, Modules, Bundles, Data, etc. */ trait HasTileParameters extends HasNonDiplomaticTileParameters { protected def tlBundleParams = p(TileVisibilityNodeKey).edges.out.head.bundle lazy val paddrBits: Int = { val bits = tlBundleParams.addressBits require(bits <= maxPAddrBits, s"Requested $bits paddr bits, but since xLen is $xLen only $maxPAddrBits will fit") bits } def vaddrBits: Int = if (usingVM) { val v = maxHVAddrBits require(v == xLen || xLen > v && v > paddrBits) v } else { // since virtual addresses sign-extend but physical addresses // zero-extend, make room for a zero sign bit for physical addresses (paddrBits + 1) min xLen } def vpnBits: Int = vaddrBits - pgIdxBits def ppnBits: Int = paddrBits - pgIdxBits def vpnBitsExtended: Int = vpnBits + (if (vaddrBits < xLen) 1 + usingHypervisor.toInt else 0) def vaddrBitsExtended: Int = vpnBitsExtended + pgIdxBits } /** Base class for all Tiles that use TileLink */ abstract class BaseTile private (crossing: ClockCrossingType, q: Parameters) extends BaseHierarchicalElement(crossing)(q) with HasNonDiplomaticTileParameters { // Public constructor alters Parameters to supply some legacy compatibility keys def this(tileParams: TileParams, crossing: ClockCrossingType, lookup: LookupByHartIdImpl, p: Parameters) = { this(crossing, p.alterMap(Map( TileKey -> tileParams, TileVisibilityNodeKey -> TLEphemeralNode()(ValName("tile_master")), LookupByHartId -> lookup ))) } def intInwardNode: IntInwardNode // Interrupts to the core from external devices def intOutwardNode: Option[IntOutwardNode] // Interrupts from tile-internal devices (e.g. BEU) def haltNode: IntOutwardNode // Unrecoverable error has occurred; suggest reset def ceaseNode: IntOutwardNode // Tile has ceased to retire instructions def wfiNode: IntOutwardNode // Tile is waiting for an interrupt def module: BaseTileModuleImp[BaseTile] /** Node for broadcasting a hart id to diplomatic consumers within the tile. */ val hartIdNexusNode: BundleBridgeNode[UInt] = BundleBroadcast[UInt](registered = p(InsertTimingClosureRegistersOnHartIds)) /** Node for consuming the hart id input in tile-layer Chisel logic. */ val hartIdSinkNode = BundleBridgeSink[UInt]() /** Node for driving a hart id input, which is to be broadcast to units within the tile. * * Making this id value an IO and then using it to do lookups of information * that would make otherwise-homogeneous tiles heterogeneous is a useful trick * to enable deduplication of tiles for hierarchical P&R flows. */ val hartIdNode: BundleBridgeInwardNode[UInt] = hartIdSinkNode := hartIdNexusNode := BundleBridgeNameNode("hartid") /** Node for broadcasting a reset vector to diplomatic consumers within the tile. */ val resetVectorNexusNode: BundleBridgeNode[UInt] = BundleBroadcast[UInt]() /** Node for consuming the reset vector input in tile-layer Chisel logic. * * Its width is sized by looking at the size of the address space visible * on the tile's master ports, but this lookup is not evaluated until * diplomacy has completed and Chisel elaboration has begun. */ val resetVectorSinkNode = BundleBridgeSink[UInt](Some(() => UInt(visiblePhysAddrBits.W))) /** Node for supplying a reset vector that processors in this tile might begin fetching instructions from as they come out of reset. */ val resetVectorNode: BundleBridgeInwardNode[UInt] = resetVectorSinkNode := resetVectorNexusNode := BundleBridgeNameNode("reset_vector") /** Nodes for connecting NMI interrupt sources and vectors into the tile */ val nmiSinkNode = Option.when(tileParams.core.useNMI) { BundleBridgeSink[NMI](Some(() => new NMI(visiblePhysAddrBits))) } val nmiNode: Option[BundleBridgeInwardNode[NMI]] = nmiSinkNode.map(_ := BundleBridgeNameNode("nmi")) /** Node for broadcasting an address prefix to diplomatic consumers within the tile. * * The prefix should be applied by consumers by or-ing ouputs of this node * with a static base address (which is looked up based on the driven hartid value). */ val mmioAddressPrefixNexusNode = BundleBridgeNexus[UInt]( inputFn = BundleBridgeNexus.orReduction[UInt](registered = false) _, outputFn = BundleBridgeNexus.fillN[UInt](registered = false) _, default = Some(() => 0.U(1.W)) ) /** Node for external drivers to prefix base addresses of MMIO devices to which the core has a direct access path. */ val mmioAddressPrefixNode: BundleBridgeInwardNode[UInt] = mmioAddressPrefixNexusNode :=* BundleBridgeNameNode("mmio_address_prefix") // TODO: Any node marked "consumed by the core" or "driven by the core" // should be moved to either be: a member of a specific BaseTile subclass, // or actually just a member of the core's LazyModule itself, // assuming the core itself is diplomatic. // Then these nodes should just become IdentityNodes of their respective type protected def traceRetireWidth = tileParams.core.retireWidth /** Node for the core to drive legacy "raw" instruction trace. */ val traceSourceNode = BundleBridgeSource(() => new TraceBundle) /** Node for external consumers to source a legacy instruction trace from the core. */ val traceNode = traceSourceNode def traceCoreParams = new TraceCoreParams() /** Node for core to drive instruction trace conforming to RISC-V Processor Trace spec V1.0 */ val traceCoreSourceNode = BundleBridgeSource(() => new TraceCoreInterface(traceCoreParams)) /** Node for external consumers to source a V1.0 instruction trace from the core. */ val traceCoreNode = traceCoreSourceNode /** Node to broadcast collected trace sideband signals into the tile. */ val traceAuxNexusNode = BundleBridgeNexus[TraceAux](default = Some(() => { val aux = Wire(new TraceAux) aux.stall := false.B aux.enable := false.B aux })) /** Trace sideband signals to be consumed by the core. */ val traceAuxSinkNode = BundleBridgeSink[TraceAux]() /** Trace sideband signals collected here to be driven into the tile. */ val traceAuxNode: BundleBridgeInwardNode[TraceAux] = traceAuxSinkNode := traceAuxNexusNode :=* BundleBridgeNameNode("trace_aux") /** Node for watchpoints to control trace driven by core. */ val bpwatchSourceNode = BundleBridgeSource(() => Vec(tileParams.core.nBreakpoints, new BPWatch(traceRetireWidth))) /** Node to broadcast watchpoints to control trace. */ val bpwatchNexusNode = BundleBroadcast[Vec[BPWatch]]() /** Node for external consumers to source watchpoints to control trace. */ val bpwatchNode: BundleBridgeOutwardNode[Vec[BPWatch]] = BundleBridgeNameNode("bpwatch") :*= bpwatchNexusNode := bpwatchSourceNode /** Helper function for connecting MMIO devices inside the tile to an xbar that will make them visible to external masters. */ def connectTLSlave(xbarNode: TLOutwardNode, node: TLNode, bytes: Int): Unit = { DisableMonitors { implicit p => (Seq(node, TLFragmenter(bytes, cacheBlockBytes, earlyAck=EarlyAck.PutFulls)) ++ (xBytes != bytes).option(TLWidthWidget(xBytes))) .foldRight(xbarNode)(_ :*= _) } } def connectTLSlave(node: TLNode, bytes: Int): Unit = { connectTLSlave(tlSlaveXbar.node, node, bytes) } /** TileLink node which represents the view that the intra-tile masters have of the rest of the system. */ val visibilityNode = p(TileVisibilityNodeKey) protected def visibleManagers = visibilityNode.edges.out.flatMap(_.manager.managers) protected def visiblePhysAddrBits = visibilityNode.edges.out.head.bundle.addressBits def unifyManagers: List[TLManagerParameters] = ManagerUnification(visibleManagers) /** Finds resource labels for all the outward caches. */ def nextLevelCacheProperty: PropertyOption = { val outer = visibleManagers .filter(_.supportsAcquireB) .flatMap(_.resources.headOption) .map(_.owner.label) .distinct if (outer.isEmpty) None else Some("next-level-cache" -> outer.map(l => ResourceReference(l)).toList) } /** Create a DTS representation of this "cpu". */ def cpuProperties: PropertyMap = Map( "device_type" -> "cpu".asProperty, "status" -> "okay".asProperty, "clock-frequency" -> tileParams.core.bootFreqHz.asProperty, "riscv,isa" -> isaDTS.asProperty, "timebase-frequency" -> p(DTSTimebase).asProperty, "hardware-exec-breakpoint-count" -> tileParams.core.nBreakpoints.asProperty ) /** Can be used to access derived params calculated by HasCoreParameters * * However, callers must ensure they do not access a diplomatically-determined parameter * before the graph in question has been fully connected. */ protected lazy val lazyCoreParamsView: HasCoreParameters = { class C(implicit val p: Parameters) extends HasCoreParameters new C } this.suggestName(tileParams.baseName) } abstract class BaseTileModuleImp[+L <: BaseTile](outer: L) extends BaseHierarchicalElementModuleImp[L](outer) File HellaCache.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3.{dontTouch, _} import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.bundlebridge._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.amba.AMBAProtField import freechips.rocketchip.diplomacy.{IdRange, TransferSizes, RegionType} import freechips.rocketchip.tile.{L1CacheParams, HasL1CacheParameters, HasCoreParameters, CoreBundle, HasNonDiplomaticTileParameters, BaseTile, HasTileParameters} import freechips.rocketchip.tilelink.{TLMasterParameters, TLClientNode, TLMasterPortParameters, TLEdgeOut, TLWidthWidget, TLFIFOFixer, ClientMetadata} import freechips.rocketchip.util.{Code, RandomReplacement, ParameterizedBundle} import freechips.rocketchip.util.{BooleanToAugmentedBoolean, IntToAugmentedInt} import scala.collection.mutable.ListBuffer case class DCacheParams( nSets: Int = 64, nWays: Int = 4, rowBits: Int = 64, subWordBits: Option[Int] = None, replacementPolicy: String = "random", nTLBSets: Int = 1, nTLBWays: Int = 32, nTLBBasePageSectors: Int = 4, nTLBSuperpages: Int = 4, tagECC: Option[String] = None, dataECC: Option[String] = None, dataECCBytes: Int = 1, nMSHRs: Int = 1, nSDQ: Int = 17, nRPQ: Int = 16, nMMIOs: Int = 1, blockBytes: Int = 64, separateUncachedResp: Boolean = false, acquireBeforeRelease: Boolean = false, pipelineWayMux: Boolean = false, clockGate: Boolean = false, scratch: Option[BigInt] = None) extends L1CacheParams { def tagCode: Code = Code.fromString(tagECC) def dataCode: Code = Code.fromString(dataECC) def dataScratchpadBytes: Int = scratch.map(_ => nSets*blockBytes).getOrElse(0) def replacement = new RandomReplacement(nWays) def silentDrop: Boolean = !acquireBeforeRelease require((!scratch.isDefined || nWays == 1), "Scratchpad only allowed in direct-mapped cache.") require((!scratch.isDefined || nMSHRs == 0), "Scratchpad only allowed in blocking cache.") if (scratch.isEmpty) require(isPow2(nSets), s"nSets($nSets) must be pow2") } trait HasL1HellaCacheParameters extends HasL1CacheParameters with HasCoreParameters { val cacheParams = tileParams.dcache.get val cfg = cacheParams def wordBits = coreDataBits def wordBytes = coreDataBytes def subWordBits = cacheParams.subWordBits.getOrElse(wordBits) def subWordBytes = subWordBits / 8 def wordOffBits = log2Up(wordBytes) def beatBytes = cacheBlockBytes / cacheDataBeats def beatWords = beatBytes / wordBytes def beatOffBits = log2Up(beatBytes) def idxMSB = untagBits-1 def idxLSB = blockOffBits def offsetmsb = idxLSB-1 def offsetlsb = wordOffBits def rowWords = rowBits/wordBits def doNarrowRead = coreDataBits * nWays % rowBits == 0 def eccBytes = cacheParams.dataECCBytes val eccBits = cacheParams.dataECCBytes * 8 val encBits = cacheParams.dataCode.width(eccBits) val encWordBits = encBits * (wordBits / eccBits) def encDataBits = cacheParams.dataCode.width(coreDataBits) // NBDCache only def encRowBits = encDataBits*rowWords def lrscCycles = coreParams.lrscCycles // ISA requires 16-insn LRSC sequences to succeed def lrscBackoff = 3 // disallow LRSC reacquisition briefly def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant def nIOMSHRs = cacheParams.nMMIOs def maxUncachedInFlight = cacheParams.nMMIOs def dataScratchpadSize = cacheParams.dataScratchpadBytes require(rowBits >= coreDataBits, s"rowBits($rowBits) < coreDataBits($coreDataBits)") if (!usingDataScratchpad) require(rowBits == cacheDataBits, s"rowBits($rowBits) != cacheDataBits($cacheDataBits)") // would need offset addr for puts if data width < xlen require(xLen <= cacheDataBits, s"xLen($xLen) > cacheDataBits($cacheDataBits)") } abstract class L1HellaCacheModule(implicit val p: Parameters) extends Module with HasL1HellaCacheParameters abstract class L1HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p) with HasL1HellaCacheParameters /** Bundle definitions for HellaCache interfaces */ trait HasCoreMemOp extends HasL1HellaCacheParameters { val addr = UInt(coreMaxAddrBits.W) val idx = (usingVM && untagBits > pgIdxBits).option(UInt(coreMaxAddrBits.W)) val tag = UInt((coreParams.dcacheReqTagBits + log2Ceil(dcacheArbPorts)).W) val cmd = UInt(M_SZ.W) val size = UInt(log2Ceil(coreDataBytes.log2 + 1).W) val signed = Bool() val dprv = UInt(PRV.SZ.W) val dv = Bool() } trait HasCoreData extends HasCoreParameters { val data = UInt(coreDataBits.W) val mask = UInt(coreDataBytes.W) } class HellaCacheReqInternal(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp { val phys = Bool() val no_resp = Bool() // The dcache may omit generating a response for this request val no_alloc = Bool() val no_xcpt = Bool() } class HellaCacheReq(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData class HellaCacheResp(implicit p: Parameters) extends CoreBundle()(p) with HasCoreMemOp with HasCoreData { val replay = Bool() val has_data = Bool() val data_word_bypass = UInt(coreDataBits.W) val data_raw = UInt(coreDataBits.W) val store_data = UInt(coreDataBits.W) } class AlignmentExceptions extends Bundle { val ld = Bool() val st = Bool() } class HellaCacheExceptions extends Bundle { val ma = new AlignmentExceptions val pf = new AlignmentExceptions val gf = new AlignmentExceptions val ae = new AlignmentExceptions } class HellaCacheWriteData(implicit p: Parameters) extends CoreBundle()(p) with HasCoreData class HellaCachePerfEvents extends Bundle { val acquire = Bool() val release = Bool() val grant = Bool() val tlbMiss = Bool() val blocked = Bool() val canAcceptStoreThenLoad = Bool() val canAcceptStoreThenRMW = Bool() val canAcceptLoadThenLoad = Bool() val storeBufferEmptyAfterLoad = Bool() val storeBufferEmptyAfterStore = Bool() } // interface between D$ and processor/DTLB class HellaCacheIO(implicit p: Parameters) extends CoreBundle()(p) { val req = Decoupled(new HellaCacheReq) val s1_kill = Output(Bool()) // kill previous cycle's req val s1_data = Output(new HellaCacheWriteData()) // data for previous cycle's req val s2_nack = Input(Bool()) // req from two cycles ago is rejected val s2_nack_cause_raw = Input(Bool()) // reason for nack is store-load RAW hazard (performance hint) val s2_kill = Output(Bool()) // kill req from two cycles ago val s2_uncached = Input(Bool()) // advisory signal that the access is MMIO val s2_paddr = Input(UInt(paddrBits.W)) // translated address val resp = Flipped(Valid(new HellaCacheResp)) val replay_next = Input(Bool()) val s2_xcpt = Input(new HellaCacheExceptions) val s2_gpa = Input(UInt(vaddrBitsExtended.W)) val s2_gpa_is_pte = Input(Bool()) val uncached_resp = tileParams.dcache.get.separateUncachedResp.option(Flipped(Decoupled(new HellaCacheResp))) val ordered = Input(Bool()) val store_pending = Input(Bool()) // there is a store in a store buffer somewhere val perf = Input(new HellaCachePerfEvents()) val keep_clock_enabled = Output(Bool()) // should D$ avoid clock-gating itself? val clock_enabled = Input(Bool()) // is D$ currently being clocked? } /** Base classes for Diplomatic TL2 HellaCaches */ abstract class HellaCache(tileId: Int)(implicit p: Parameters) extends LazyModule with HasNonDiplomaticTileParameters { protected val cfg = tileParams.dcache.get protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1( name = s"Core ${tileId} DCache", sourceId = IdRange(0, 1 max cfg.nMSHRs), supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes)))) protected def mmioClientParameters = Seq(TLMasterParameters.v1( name = s"Core ${tileId} DCache MMIO", sourceId = IdRange(firstMMIO, firstMMIO + cfg.nMMIOs), requestFifo = true)) def firstMMIO = (cacheClientParameters.map(_.sourceId.end) :+ 0).max val node = TLClientNode(Seq(TLMasterPortParameters.v1( clients = cacheClientParameters ++ mmioClientParameters, minLatency = 1, requestFields = tileParams.core.useVM.option(Seq()).getOrElse(Seq(AMBAProtField()))))) val hartIdSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]()) val mmioAddressPrefixSinkNodeOpt = cfg.scratch.map(_ => BundleBridgeSink[UInt]()) val module: HellaCacheModule def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireB || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT) def canSupportCFlushLine = !usingVM || cfg.blockBytes * cfg.nSets <= (1 << pgIdxBits) require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$") } class HellaCacheBundle(implicit p: Parameters) extends CoreBundle()(p) { val cpu = Flipped(new HellaCacheIO) val ptw = new TLBPTWIO() val errors = new DCacheErrors val tlb_port = new DCacheTLBPort } class HellaCacheModule(outer: HellaCache) extends LazyModuleImp(outer) with HasL1HellaCacheParameters { implicit val edge: TLEdgeOut = outer.node.edges.out(0) val (tl_out, _) = outer.node.out(0) val io = IO(new HellaCacheBundle) val io_hartid = outer.hartIdSinkNodeOpt.map(_.bundle) val io_mmio_address_prefix = outer.mmioAddressPrefixSinkNodeOpt.map(_.bundle) dontTouch(io.cpu.resp) // Users like to monitor these fields even if the core ignores some signals dontTouch(io.cpu.s1_data) require(rowBits == edge.bundle.dataBits) private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile) fifoManagers.foreach { m => require (m.fifoId == fifoManagers.head.fifoId, s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees\n"+ s"${m.nodePath.map(_.name)}\nversus\n${fifoManagers.head.nodePath.map(_.name)}") } } /** Support overriding which HellaCache is instantiated */ case object BuildHellaCache extends Field[BaseTile => Parameters => HellaCache](HellaCacheFactory.apply) object HellaCacheFactory { def apply(tile: BaseTile)(p: Parameters): HellaCache = { if (tile.tileParams.dcache.get.nMSHRs == 0) new DCache(tile.tileId, tile.crossing)(p) else new NonBlockingDCache(tile.tileId)(p) } } /** Mix-ins for constructing tiles that have a HellaCache */ trait HasHellaCache { this: BaseTile => val module: HasHellaCacheModule implicit val p: Parameters var nDCachePorts = 0 lazy val dcache: HellaCache = LazyModule(p(BuildHellaCache)(this)(p)) tlMasterXbar.node := TLWidthWidget(tileParams.dcache.get.rowBits/8) := dcache.node dcache.hartIdSinkNodeOpt.map { _ := hartIdNexusNode } dcache.mmioAddressPrefixSinkNodeOpt.map { _ := mmioAddressPrefixNexusNode } InModuleBody { dcache.module.io.tlb_port := DontCare } } trait HasHellaCacheModule { val outer: HasHellaCache with HasTileParameters implicit val p: Parameters val dcachePorts = ListBuffer[HellaCacheIO]() val dcacheArb = Module(new HellaCacheArbiter(outer.nDCachePorts)(outer.p)) outer.dcache.module.io.cpu <> dcacheArb.io.mem } /** Metadata array used for all HellaCaches */ class L1Metadata(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val coh = new ClientMetadata val tag = UInt(tagBits.W) } object L1Metadata { def apply(tag: Bits, coh: ClientMetadata)(implicit p: Parameters) = { val meta = Wire(new L1Metadata) meta.tag := tag meta.coh := coh meta } } class L1MetaReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) { val idx = UInt(idxBits.W) val way_en = UInt(nWays.W) val tag = UInt(tagBits.W) } class L1MetaWriteReq(implicit p: Parameters) extends L1MetaReadReq()(p) { val data = new L1Metadata } class L1MetadataArray[T <: L1Metadata](onReset: () => T)(implicit p: Parameters) extends L1HellaCacheModule()(p) { val rstVal = onReset() val io = IO(new Bundle { val read = Flipped(Decoupled(new L1MetaReadReq)) val write = Flipped(Decoupled(new L1MetaWriteReq)) val resp = Output(Vec(nWays, rstVal.cloneType)) }) val rst_cnt = RegInit(0.U(log2Up(nSets+1).W)) val rst = rst_cnt < nSets.U val waddr = Mux(rst, rst_cnt, io.write.bits.idx) val wdata = Mux(rst, rstVal, io.write.bits.data).asUInt val wmask = Mux(rst || (nWays == 1).B, (-1).S, io.write.bits.way_en.asSInt).asBools val rmask = Mux(rst || (nWays == 1).B, (-1).S, io.read.bits.way_en.asSInt).asBools when (rst) { rst_cnt := rst_cnt+1.U } val metabits = rstVal.getWidth val tag_array = SyncReadMem(nSets, Vec(nWays, UInt(metabits.W))) val wen = rst || io.write.valid when (wen) { tag_array.write(waddr, VecInit.fill(nWays)(wdata), wmask) } io.resp := tag_array.read(io.read.bits.idx, io.read.fire).map(_.asTypeOf(chiselTypeOf(rstVal))) io.read.ready := !wen // so really this could be a 6T RAM io.write.ready := !rst } File Interrupts.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tile import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.resources.{Device, DeviceSnippet, Description, ResourceBinding, ResourceInt} import freechips.rocketchip.interrupts.{IntIdentityNode, IntSinkNode, IntSinkPortSimple, IntSourceNode, IntSourcePortSimple} import freechips.rocketchip.util.CanHaveErrors import freechips.rocketchip.resources.{IntToProperty, StringToProperty} import freechips.rocketchip.util.BooleanToAugmentedBoolean class NMI(val w: Int) extends Bundle { val rnmi = Bool() val rnmi_interrupt_vector = UInt(w.W) val rnmi_exception_vector = UInt(w.W) } class TileInterrupts(implicit p: Parameters) extends CoreBundle()(p) { val debug = Bool() val mtip = Bool() val msip = Bool() val meip = Bool() val seip = usingSupervisor.option(Bool()) val lip = Vec(coreParams.nLocalInterrupts, Bool()) val nmi = usingNMI.option(new NMI(resetVectorLen)) } // Use diplomatic interrupts to external interrupts from the subsystem into the tile trait SinksExternalInterrupts { this: BaseTile => val intInwardNode = intXbar.intnode :=* IntIdentityNode()(ValName("int_local")) protected val intSinkNode = IntSinkNode(IntSinkPortSimple()) intSinkNode := intXbar.intnode def cpuDevice: Device val intcDevice = new DeviceSnippet { override def parent = Some(cpuDevice) def describe(): Description = { Description("interrupt-controller", Map( "compatible" -> "riscv,cpu-intc".asProperty, "interrupt-controller" -> Nil, "#interrupt-cells" -> 1.asProperty)) } } ResourceBinding { intSinkNode.edges.in.flatMap(_.source.sources).map { case s => for (i <- s.range.start until s.range.end) { csrIntMap.lift(i).foreach { j => s.resources.foreach { r => r.bind(intcDevice, ResourceInt(j)) } } } } } // TODO: the order of the following two functions must match, and // also match the order which things are connected to the // per-tile crossbar in subsystem.HasTiles.connectInterrupts // debug, msip, mtip, meip, seip, lip offsets in CSRs def csrIntMap: List[Int] = { val nlips = tileParams.core.nLocalInterrupts val seip = if (usingSupervisor) Seq(9) else Nil List(65535, 3, 7, 11) ++ seip ++ List.tabulate(nlips)(_ + 16) } // go from flat diplomatic Interrupts to bundled TileInterrupts def decodeCoreInterrupts(core: TileInterrupts): Unit = { val async_ips = Seq(core.debug) val periph_ips = Seq( core.msip, core.mtip, core.meip) val seip = if (core.seip.isDefined) Seq(core.seip.get) else Nil val core_ips = core.lip val (interrupts, _) = intSinkNode.in(0) (async_ips ++ periph_ips ++ seip ++ core_ips).zip(interrupts).foreach { case(c, i) => c := i } } } trait SourcesExternalNotifications { this: BaseTile => // Report unrecoverable error conditions val haltNode = IntSourceNode(IntSourcePortSimple()) def reportHalt(could_halt: Option[Bool]): Unit = { val (halt_and_catch_fire, _) = haltNode.out(0) halt_and_catch_fire(0) := could_halt.map(RegEnable(true.B, false.B, _)).getOrElse(false.B) } def reportHalt(errors: Seq[CanHaveErrors]): Unit = { reportHalt(errors.flatMap(_.uncorrectable).map(_.valid).reduceOption(_||_)) } // Report when the tile has ceased to retire instructions val ceaseNode = IntSourceNode(IntSourcePortSimple()) def reportCease(could_cease: Option[Bool], quiescenceCycles: Int = 8): Unit = { def waitForQuiescence(cease: Bool): Bool = { // don't report cease until signal is stable for longer than any pipeline depth val count = RegInit(0.U(log2Ceil(quiescenceCycles + 1).W)) val saturated = count >= quiescenceCycles.U when (!cease) { count := 0.U } when (cease && !saturated) { count := count + 1.U } saturated } val (cease, _) = ceaseNode.out(0) cease(0) := could_cease.map{ c => val cease = (waitForQuiescence(c)) // Test-Only Code -- val prev_cease = RegNext(cease, false.B) assert(!(prev_cease & !cease), "CEASE line can not glitch once raised") cease }.getOrElse(false.B) } // Report when the tile is waiting for an interrupt val wfiNode = IntSourceNode(IntSourcePortSimple()) def reportWFI(could_wfi: Option[Bool]): Unit = { val (wfi, _) = wfiNode.out(0) wfi(0) := could_wfi.map(RegNext(_, init=false.B)).getOrElse(false.B) } } File WidthWidget.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.AddressSet import freechips.rocketchip.util.{Repeater, UIntToOH1} // innBeatBytes => the new client-facing bus width class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule { private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes val node = new TLAdapterNode( clientFn = { case c => c }, managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){ override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired) } override lazy val desiredName = s"TLWidthWidget$innerBeatBytes" lazy val module = new Impl class Impl extends LazyModuleImp(this) { def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = { val inBytes = edgeIn.manager.beatBytes val outBytes = edgeOut.manager.beatBytes val ratio = outBytes / inBytes val keepBits = log2Ceil(outBytes) val dropBits = log2Ceil(inBytes) val countBits = log2Ceil(ratio) val size = edgeIn.size(in.bits) val hasData = edgeIn.hasData(in.bits) val limit = UIntToOH1(size, keepBits) >> dropBits val count = RegInit(0.U(countBits.W)) val first = count === 0.U val last = count === limit || !hasData val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR } val corrupt_reg = RegInit(false.B) val corrupt_in = edgeIn.corrupt(in.bits) val corrupt_out = corrupt_in || corrupt_reg when (in.fire) { count := count + 1.U corrupt_reg := corrupt_out when (last) { count := 0.U corrupt_reg := false.B } } def helper(idata: UInt): UInt = { // rdata is X until the first time a multi-beat write occurs. // Prevent the X from leaking outside by jamming the mux control until // the first time rdata is written (and hence no longer X). val rdata_written_once = RegInit(false.B) val masked_enable = enable.map(_ || !rdata_written_once) val odata = Seq.fill(ratio) { WireInit(idata) } val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata))) val pdata = rdata :+ idata val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) } when (in.fire && !last) { rdata_written_once := true.B (rdata zip mdata) foreach { case (r, m) => r := m } } Cat(mdata.reverse) } in.ready := out.ready || !last out.valid := in.valid && last out.bits := in.bits // Don't put down hardware if we never carry data edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits))) edgeOut.corrupt(out.bits) := corrupt_out (out.bits, in.bits) match { case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W)) case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W)) case (o: TLBundleC, i: TLBundleC) => () case (o: TLBundleD, i: TLBundleD) => () case _ => require(false, "Impossible bundle combination in WidthWidget") } } def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = { val inBytes = edgeIn.manager.beatBytes val outBytes = edgeOut.manager.beatBytes val ratio = inBytes / outBytes val keepBits = log2Ceil(inBytes) val dropBits = log2Ceil(outBytes) val countBits = log2Ceil(ratio) val size = edgeIn.size(in.bits) val hasData = edgeIn.hasData(in.bits) val limit = UIntToOH1(size, keepBits) >> dropBits val count = RegInit(0.U(countBits.W)) val first = count === 0.U val last = count === limit || !hasData when (out.fire) { count := count + 1.U when (last) { count := 0.U } } // For sub-beat transfer, extract which part matters val sel = in.bits match { case a: TLBundleA => a.address(keepBits-1, dropBits) case b: TLBundleB => b.address(keepBits-1, dropBits) case c: TLBundleC => c.address(keepBits-1, dropBits) case d: TLBundleD => { val sel = sourceMap(d.source) val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway } } val index = sel | count def helper(idata: UInt, width: Int): UInt = { val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) } mux(index) } out.bits := in.bits out.valid := in.valid in.ready := out.ready // Don't put down hardware if we never carry data edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8)) (out.bits, in.bits) match { case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1) case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1) case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok case (o: TLBundleD, i: TLBundleD) => () case _ => require(false, "Impossbile bundle combination in WidthWidget") } // Repeat the input if we're not last !last } def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = { if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) { // nothing to do; pass it through out.bits := in.bits out.valid := in.valid in.ready := out.ready } else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) { // split input to output val repeat = Wire(Bool()) val repeated = Repeater(in, repeat) val cated = Wire(chiselTypeOf(repeated)) cated <> repeated edgeIn.data(cated.bits) := Cat( edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8), edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0)) repeat := split(edgeIn, cated, edgeOut, out, sourceMap) } else { // merge input to output merge(edgeIn, in, edgeOut, out) } } (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => // If the master is narrower than the slave, the D channel must be narrowed. // This is tricky, because the D channel has no address data. // Thus, you don't know which part of a sub-beat transfer to extract. // To fix this, we record the relevant address bits for all sources. // The assumption is that this sort of situation happens only where // you connect a narrow master to the system bus, so there are few sources. def sourceMap(source_bits: UInt) = { val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes) val keepBits = log2Ceil(edgeOut.manager.beatBytes) val dropBits = log2Ceil(edgeIn.manager.beatBytes) val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W))) val a_sel = in.a.bits.address(keepBits-1, dropBits) when (in.a.fire) { if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning sources(0) := a_sel } else { sources(in.a.bits.source) := a_sel } } // depopulate unused source registers: edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U } val bypass = in.a.valid && in.a.bits.source === source if (edgeIn.manager.minLatency > 0) sources(source) else Mux(bypass, a_sel, sources(source)) } splice(edgeIn, in.a, edgeOut, out.a, sourceMap) splice(edgeOut, out.d, edgeIn, in.d, sourceMap) if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) { splice(edgeOut, out.b, edgeIn, in.b, sourceMap) splice(edgeIn, in.c, edgeOut, out.c, sourceMap) out.e.valid := in.e.valid out.e.bits := in.e.bits in.e.ready := out.e.ready } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLWidthWidget { def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode = { val widget = LazyModule(new TLWidthWidget(innerBeatBytes)) widget.node } def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes) } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("WidthWidget")) val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff))) (ram.node := TLDelayer(0.1) := TLFragmenter(4, 256) := TLWidthWidget(second) := TLWidthWidget(first) := TLDelayer(0.1) := model.node := fuzz.node) lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module) dut.io.start := DontCare io.finished := dut.io.finished } File Frontend.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.bundlebridge._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.tile.{CoreBundle, BaseTile} import freechips.rocketchip.tilelink.{TLWidthWidget, TLEdgeOut} import freechips.rocketchip.util.{ClockGate, ShiftQueue, property} import freechips.rocketchip.util.UIntToAugmentedUInt class FrontendReq(implicit p: Parameters) extends CoreBundle()(p) { val pc = UInt(vaddrBitsExtended.W) val speculative = Bool() } class FrontendExceptions extends Bundle { val pf = new Bundle { val inst = Bool() } val gf = new Bundle { val inst = Bool() } val ae = new Bundle { val inst = Bool() } } class FrontendResp(implicit p: Parameters) extends CoreBundle()(p) { val btb = new BTBResp val pc = UInt(vaddrBitsExtended.W) // ID stage PC val data = UInt((fetchWidth * coreInstBits).W) val mask = Bits(fetchWidth.W) val xcpt = new FrontendExceptions val replay = Bool() } class FrontendPerfEvents extends Bundle { val acquire = Bool() val tlbMiss = Bool() } class FrontendIO(implicit p: Parameters) extends CoreBundle()(p) { val might_request = Output(Bool()) val clock_enabled = Input(Bool()) val req = Valid(new FrontendReq) val sfence = Valid(new SFenceReq) val resp = Flipped(Decoupled(new FrontendResp)) val gpa = Flipped(Valid(UInt(vaddrBitsExtended.W))) val gpa_is_pte = Input(Bool()) val btb_update = Valid(new BTBUpdate) val bht_update = Valid(new BHTUpdate) val ras_update = Valid(new RASUpdate) val flush_icache = Output(Bool()) val npc = Input(UInt(vaddrBitsExtended.W)) val perf = Input(new FrontendPerfEvents()) val progress = Output(Bool()) } class Frontend(val icacheParams: ICacheParams, tileId: Int)(implicit p: Parameters) extends LazyModule { lazy val module = new FrontendModule(this) val icache = LazyModule(new ICache(icacheParams, tileId)) val masterNode = icache.masterNode val slaveNode = icache.slaveNode val resetVectorSinkNode = BundleBridgeSink[UInt](Some(() => UInt(masterNode.edges.out.head.bundle.addressBits.W))) } class FrontendBundle(val outer: Frontend) extends CoreBundle()(outer.p) { val cpu = Flipped(new FrontendIO()) val ptw = new TLBPTWIO() val errors = new ICacheErrors } class FrontendModule(outer: Frontend) extends LazyModuleImp(outer) with HasRocketCoreParameters with HasL1ICacheParameters { val io = IO(new FrontendBundle(outer)) val io_reset_vector = outer.resetVectorSinkNode.bundle implicit val edge: TLEdgeOut = outer.masterNode.edges.out(0) val icache = outer.icache.module require(fetchWidth*coreInstBytes == outer.icacheParams.fetchBytes) val fq = withReset(reset.asBool || io.cpu.req.valid) { Module(new ShiftQueue(new FrontendResp, 5, flow = true)) } val clock_en_reg = Reg(Bool()) val clock_en = clock_en_reg || io.cpu.might_request io.cpu.clock_enabled := clock_en assert(!(io.cpu.req.valid || io.cpu.sfence.valid || io.cpu.flush_icache || io.cpu.bht_update.valid || io.cpu.btb_update.valid) || io.cpu.might_request) val gated_clock = if (!rocketParams.clockGate) clock else ClockGate(clock, clock_en, "icache_clock_gate") icache.clock := gated_clock icache.io.clock_enabled := clock_en withClock (gated_clock) { // entering gated-clock domain val tlb = Module(new TLB(true, log2Ceil(fetchBytes), TLBConfig(nTLBSets, nTLBWays, outer.icacheParams.nTLBBasePageSectors, outer.icacheParams.nTLBSuperpages))) val s1_valid = Reg(Bool()) val s2_valid = RegInit(false.B) val s0_fq_has_space = !fq.io.mask(fq.io.mask.getWidth-3) || (!fq.io.mask(fq.io.mask.getWidth-2) && (!s1_valid || !s2_valid)) || (!fq.io.mask(fq.io.mask.getWidth-1) && (!s1_valid && !s2_valid)) val s0_valid = io.cpu.req.valid || s0_fq_has_space s1_valid := s0_valid val s1_pc = Reg(UInt(vaddrBitsExtended.W)) val s1_speculative = Reg(Bool()) val s2_pc = RegInit(t = UInt(vaddrBitsExtended.W), alignPC(io_reset_vector)) val s2_btb_resp_valid = if (usingBTB) Reg(Bool()) else false.B val s2_btb_resp_bits = Reg(new BTBResp) val s2_btb_taken = s2_btb_resp_valid && s2_btb_resp_bits.taken val s2_tlb_resp = Reg(tlb.io.resp.cloneType) val s2_xcpt = s2_tlb_resp.ae.inst || s2_tlb_resp.pf.inst || s2_tlb_resp.gf.inst val s2_speculative = RegInit(false.B) val s2_partial_insn_valid = RegInit(false.B) val s2_partial_insn = Reg(UInt(coreInstBits.W)) val wrong_path = RegInit(false.B) val s1_base_pc = ~(~s1_pc | (fetchBytes - 1).U) val ntpc = s1_base_pc + fetchBytes.U val predicted_npc = WireDefault(ntpc) val predicted_taken = WireDefault(false.B) val s2_replay = Wire(Bool()) s2_replay := (s2_valid && !fq.io.enq.fire) || RegNext(s2_replay && !s0_valid, true.B) val npc = Mux(s2_replay, s2_pc, predicted_npc) s1_pc := io.cpu.npc // consider RVC fetches across blocks to be non-speculative if the first // part was non-speculative val s0_speculative = if (usingCompressed) s1_speculative || s2_valid && !s2_speculative || predicted_taken else true.B s1_speculative := Mux(io.cpu.req.valid, io.cpu.req.bits.speculative, Mux(s2_replay, s2_speculative, s0_speculative)) val s2_redirect = WireDefault(io.cpu.req.valid) s2_valid := false.B when (!s2_replay) { s2_valid := !s2_redirect s2_pc := s1_pc s2_speculative := s1_speculative s2_tlb_resp := tlb.io.resp } val recent_progress_counter_init = 3.U val recent_progress_counter = RegInit(recent_progress_counter_init) val recent_progress = recent_progress_counter > 0.U when(io.ptw.req.fire && recent_progress) { recent_progress_counter := recent_progress_counter - 1.U } when(io.cpu.progress) { recent_progress_counter := recent_progress_counter_init } val s2_kill_speculative_tlb_refill = s2_speculative && !recent_progress io.ptw <> tlb.io.ptw tlb.io.req.valid := s1_valid && !s2_replay tlb.io.req.bits.cmd := M_XRD // Frontend only reads tlb.io.req.bits.vaddr := s1_pc tlb.io.req.bits.passthrough := false.B tlb.io.req.bits.size := log2Ceil(coreInstBytes*fetchWidth).U tlb.io.req.bits.prv := io.ptw.status.prv tlb.io.req.bits.v := io.ptw.status.v tlb.io.sfence := io.cpu.sfence tlb.io.kill := !s2_valid || s2_kill_speculative_tlb_refill icache.io.req.valid := s0_valid icache.io.req.bits.addr := io.cpu.npc icache.io.invalidate := io.cpu.flush_icache icache.io.s1_paddr := tlb.io.resp.paddr icache.io.s2_vaddr := s2_pc icache.io.s1_kill := s2_redirect || tlb.io.resp.miss || s2_replay val s2_can_speculatively_refill = s2_tlb_resp.cacheable && !io.ptw.customCSRs.asInstanceOf[RocketCustomCSRs].disableSpeculativeICacheRefill icache.io.s2_kill := s2_speculative && !s2_can_speculatively_refill || s2_xcpt icache.io.s2_cacheable := s2_tlb_resp.cacheable icache.io.s2_prefetch := s2_tlb_resp.prefetchable && !io.ptw.customCSRs.asInstanceOf[RocketCustomCSRs].disableICachePrefetch fq.io.enq.valid := RegNext(s1_valid) && s2_valid && (icache.io.resp.valid || (s2_kill_speculative_tlb_refill && s2_tlb_resp.miss) || (!s2_tlb_resp.miss && icache.io.s2_kill)) fq.io.enq.bits.pc := s2_pc io.cpu.npc := alignPC(Mux(io.cpu.req.valid, io.cpu.req.bits.pc, npc)) fq.io.enq.bits.data := icache.io.resp.bits.data fq.io.enq.bits.mask := ((1 << fetchWidth)-1).U << s2_pc.extract(log2Ceil(fetchWidth)+log2Ceil(coreInstBytes)-1, log2Ceil(coreInstBytes)) fq.io.enq.bits.replay := (icache.io.resp.bits.replay || icache.io.s2_kill && !icache.io.resp.valid && !s2_xcpt) || (s2_kill_speculative_tlb_refill && s2_tlb_resp.miss) fq.io.enq.bits.btb := s2_btb_resp_bits fq.io.enq.bits.btb.taken := s2_btb_taken fq.io.enq.bits.xcpt := s2_tlb_resp assert(!(s2_speculative && io.ptw.customCSRs.asInstanceOf[RocketCustomCSRs].disableSpeculativeICacheRefill && !icache.io.s2_kill)) when (icache.io.resp.valid && icache.io.resp.bits.ae) { fq.io.enq.bits.xcpt.ae.inst := true.B } if (usingBTB) { val btb = Module(new BTB) btb.io.flush := false.B btb.io.req.valid := false.B btb.io.req.bits.addr := s1_pc btb.io.btb_update := io.cpu.btb_update btb.io.bht_update := io.cpu.bht_update btb.io.ras_update.valid := false.B btb.io.ras_update.bits := DontCare btb.io.bht_advance.valid := false.B btb.io.bht_advance.bits := DontCare when (!s2_replay) { btb.io.req.valid := !s2_redirect s2_btb_resp_valid := btb.io.resp.valid s2_btb_resp_bits := btb.io.resp.bits } when (btb.io.resp.valid && btb.io.resp.bits.taken) { predicted_npc := btb.io.resp.bits.target.sextTo(vaddrBitsExtended) predicted_taken := true.B } val force_taken = io.ptw.customCSRs.bpmStatic when (io.ptw.customCSRs.flushBTB) { btb.io.flush := true.B } when (force_taken) { btb.io.bht_update.valid := false.B } val s2_base_pc = ~(~s2_pc | (fetchBytes-1).U) val taken_idx = Wire(UInt()) val after_idx = Wire(UInt()) val useRAS = WireDefault(false.B) val updateBTB = WireDefault(false.B) // If !prevTaken, ras_update / bht_update is always invalid. taken_idx := DontCare after_idx := DontCare def scanInsns(idx: Int, prevValid: Bool, prevBits: UInt, prevTaken: Bool): Bool = { def insnIsRVC(bits: UInt) = bits(1,0) =/= 3.U val prevRVI = prevValid && !insnIsRVC(prevBits) val valid = fq.io.enq.bits.mask(idx) && !prevRVI val bits = fq.io.enq.bits.data(coreInstBits*(idx+1)-1, coreInstBits*idx) val rvc = insnIsRVC(bits) val rviBits = Cat(bits, prevBits) val rviBranch = rviBits(6,0) === Instructions.BEQ.value.U.extract(6,0) val rviJump = rviBits(6,0) === Instructions.JAL.value.U.extract(6,0) val rviJALR = rviBits(6,0) === Instructions.JALR.value.U.extract(6,0) val rviReturn = rviJALR && !rviBits(7) && BitPat("b00?01") === rviBits(19,15) val rviCall = (rviJALR || rviJump) && rviBits(7) val rvcBranch = bits === Instructions.C_BEQZ || bits === Instructions.C_BNEZ val rvcJAL = (xLen == 32).B && bits === Instructions32.C_JAL val rvcJump = bits === Instructions.C_J || rvcJAL val rvcImm = Mux(bits(14), new RVCDecoder(bits, xLen, fLen).bImm.asSInt, new RVCDecoder(bits, xLen, fLen).jImm.asSInt) val rvcJR = bits === Instructions.C_MV && bits(6,2) === 0.U val rvcReturn = rvcJR && BitPat("b00?01") === bits(11,7) val rvcJALR = bits === Instructions.C_ADD && bits(6,2) === 0.U val rvcCall = rvcJAL || rvcJALR val rviImm = Mux(rviBits(3), ImmGen(IMM_UJ, rviBits), ImmGen(IMM_SB, rviBits)) val predict_taken = s2_btb_resp_bits.bht.taken || force_taken val taken = prevRVI && (rviJump || rviJALR || rviBranch && predict_taken) || valid && (rvcJump || rvcJALR || rvcJR || rvcBranch && predict_taken) val predictReturn = btb.io.ras_head.valid && (prevRVI && rviReturn || valid && rvcReturn) val predictJump = prevRVI && rviJump || valid && rvcJump val predictBranch = predict_taken && (prevRVI && rviBranch || valid && rvcBranch) when (s2_valid && s2_btb_resp_valid && s2_btb_resp_bits.bridx === idx.U && valid && !rvc) { // The BTB has predicted that the middle of an RVI instruction is // a branch! Flush the BTB and the pipeline. btb.io.flush := true.B fq.io.enq.bits.replay := true.B wrong_path := true.B ccover(wrong_path, "BTB_NON_CFI_ON_WRONG_PATH", "BTB predicted a non-branch was taken while on the wrong path") } when (!prevTaken) { taken_idx := idx.U after_idx := (idx + 1).U btb.io.ras_update.valid := fq.io.enq.fire && !wrong_path && (prevRVI && (rviCall || rviReturn) || valid && (rvcCall || rvcReturn)) btb.io.ras_update.bits.cfiType := Mux(Mux(prevRVI, rviReturn, rvcReturn), CFIType.ret, Mux(Mux(prevRVI, rviCall, rvcCall), CFIType.call, Mux(Mux(prevRVI, rviBranch, rvcBranch) && !force_taken, CFIType.branch, CFIType.jump))) when (!s2_btb_taken) { when (fq.io.enq.fire && taken && !predictBranch && !predictJump && !predictReturn) { wrong_path := true.B } when (s2_valid && predictReturn) { useRAS := true.B } when (s2_valid && (predictBranch || predictJump)) { val pc = s2_base_pc | (idx*coreInstBytes).U val npc = if (idx == 0) pc.asSInt + Mux(prevRVI, rviImm -& 2.S, rvcImm) else Mux(prevRVI, pc - coreInstBytes.U, pc).asSInt + Mux(prevRVI, rviImm, rvcImm) predicted_npc := npc.asUInt } } when (prevRVI && rviBranch || valid && rvcBranch) { btb.io.bht_advance.valid := fq.io.enq.fire && !wrong_path btb.io.bht_advance.bits := s2_btb_resp_bits } when (!s2_btb_resp_valid && (predictBranch && s2_btb_resp_bits.bht.strongly_taken || predictJump || predictReturn)) { updateBTB := true.B } } if (idx == fetchWidth-1) { when (fq.io.enq.fire) { s2_partial_insn_valid := false.B when (valid && !prevTaken && !rvc) { s2_partial_insn_valid := true.B s2_partial_insn := bits | 0x3.U } } prevTaken || taken } else { scanInsns(idx + 1, valid, bits, prevTaken || taken) } } when (!io.cpu.btb_update.valid) { val fetch_bubble_likely = !fq.io.mask(1) btb.io.btb_update.valid := fq.io.enq.fire && !wrong_path && fetch_bubble_likely && updateBTB btb.io.btb_update.bits.prediction.entry := tileParams.btb.get.nEntries.U btb.io.btb_update.bits.isValid := true.B btb.io.btb_update.bits.cfiType := btb.io.ras_update.bits.cfiType btb.io.btb_update.bits.br_pc := s2_base_pc | (taken_idx << log2Ceil(coreInstBytes)) btb.io.btb_update.bits.pc := s2_base_pc } btb.io.ras_update.bits.returnAddr := s2_base_pc + (after_idx << log2Ceil(coreInstBytes)) val taken = scanInsns(0, s2_partial_insn_valid, s2_partial_insn, false.B) when (useRAS) { predicted_npc := btb.io.ras_head.bits } when (fq.io.enq.fire && (s2_btb_taken || taken)) { s2_partial_insn_valid := false.B } when (!s2_btb_taken) { when (taken) { fq.io.enq.bits.btb.bridx := taken_idx fq.io.enq.bits.btb.taken := true.B fq.io.enq.bits.btb.entry := tileParams.btb.get.nEntries.U when (fq.io.enq.fire) { s2_redirect := true.B } } } assert(!s2_partial_insn_valid || fq.io.enq.bits.mask(0)) when (s2_redirect) { s2_partial_insn_valid := false.B } when (io.cpu.req.valid) { wrong_path := false.B } } io.cpu.resp <> fq.io.deq // supply guest physical address to commit stage val gpa_valid = Reg(Bool()) val gpa = Reg(UInt(vaddrBitsExtended.W)) val gpa_is_pte = Reg(Bool()) when (fq.io.enq.fire && s2_tlb_resp.gf.inst) { when (!gpa_valid) { gpa := s2_tlb_resp.gpa gpa_is_pte := s2_tlb_resp.gpa_is_pte } gpa_valid := true.B } when (io.cpu.req.valid) { gpa_valid := false.B } io.cpu.gpa.valid := gpa_valid io.cpu.gpa.bits := gpa io.cpu.gpa_is_pte := gpa_is_pte // performance events io.cpu.perf.acquire := icache.io.perf.acquire io.cpu.perf.tlbMiss := io.ptw.req.fire io.errors := icache.io.errors // gate the clock clock_en_reg := !rocketParams.clockGate.B || io.cpu.might_request || // chicken bit icache.io.keep_clock_enabled || // I$ miss or ITIM access s1_valid || s2_valid || // some fetch in flight !tlb.io.req.ready || // handling TLB miss !fq.io.mask(fq.io.mask.getWidth-1) // queue not full } // leaving gated-clock domain def alignPC(pc: UInt) = ~(~pc | (coreInstBytes - 1).U) def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = property.cover(cond, s"FRONTEND_$label", "Rocket;;" + desc) } /** Mix-ins for constructing tiles that have an ICache-based pipeline frontend */ trait HasICacheFrontend extends CanHavePTW { this: BaseTile => val module: HasICacheFrontendModule val frontend = LazyModule(new Frontend(tileParams.icache.get, tileId)) tlMasterXbar.node := TLWidthWidget(tileParams.icache.get.rowBits/8) := frontend.masterNode connectTLSlave(frontend.slaveNode, tileParams.core.fetchBytes) frontend.icache.hartIdSinkNodeOpt.foreach { _ := hartIdNexusNode } frontend.icache.mmioAddressPrefixSinkNodeOpt.foreach { _ := mmioAddressPrefixNexusNode } frontend.resetVectorSinkNode := resetVectorNexusNode nPTWPorts += 1 // This should be a None in the case of not having an ITIM address, when we // don't actually use the device that is instantiated in the frontend. private val deviceOpt = if (tileParams.icache.get.itimAddr.isDefined) Some(frontend.icache.device) else None } trait HasICacheFrontendModule extends CanHavePTWModule { val outer: HasICacheFrontend ptwPorts += outer.frontend.module.io.ptw } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File PTW.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util.{Arbiter, Cat, Decoupled, Enum, Mux1H, OHToUInt, PopCount, PriorityEncoder, PriorityEncoderOH, RegEnable, UIntToOH, Valid, is, isPow2, log2Ceil, switch} import chisel3.withClock import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.subsystem.CacheBlockBytes import freechips.rocketchip.tile._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property import scala.collection.mutable.ListBuffer /** PTE request from TLB to PTW * * TLB send a PTE request to PTW when L1TLB miss */ class PTWReq(implicit p: Parameters) extends CoreBundle()(p) { val addr = UInt(vpnBits.W) val need_gpa = Bool() val vstage1 = Bool() val stage2 = Bool() } /** PTE info from L2TLB to TLB * * containing: target PTE, exceptions, two-satge tanslation info */ class PTWResp(implicit p: Parameters) extends CoreBundle()(p) { /** ptw access exception */ val ae_ptw = Bool() /** final access exception */ val ae_final = Bool() /** page fault */ val pf = Bool() /** guest page fault */ val gf = Bool() /** hypervisor read */ val hr = Bool() /** hypervisor write */ val hw = Bool() /** hypervisor execute */ val hx = Bool() /** PTE to refill L1TLB * * source: L2TLB */ val pte = new PTE /** pte pglevel */ val level = UInt(log2Ceil(pgLevels).W) /** fragmented_superpage support */ val fragmented_superpage = Bool() /** homogeneous for both pma and pmp */ val homogeneous = Bool() val gpa = Valid(UInt(vaddrBits.W)) val gpa_is_pte = Bool() } /** IO between TLB and PTW * * PTW receives : * - PTE request * - CSRs info * - pmp results from PMP(in TLB) */ class TLBPTWIO(implicit p: Parameters) extends CoreBundle()(p) with HasCoreParameters { val req = Decoupled(Valid(new PTWReq)) val resp = Flipped(Valid(new PTWResp)) val ptbr = Input(new PTBR()) val hgatp = Input(new PTBR()) val vsatp = Input(new PTBR()) val status = Input(new MStatus()) val hstatus = Input(new HStatus()) val gstatus = Input(new MStatus()) val pmp = Input(Vec(nPMPs, new PMP)) val customCSRs = Flipped(coreParams.customCSRs) } /** PTW performance statistics */ class PTWPerfEvents extends Bundle { val l2miss = Bool() val l2hit = Bool() val pte_miss = Bool() val pte_hit = Bool() } /** Datapath IO between PTW and Core * * PTW receives CSRs info, pmp checks, sfence instruction info * * PTW sends its performance statistics to core */ class DatapathPTWIO(implicit p: Parameters) extends CoreBundle()(p) with HasCoreParameters { val ptbr = Input(new PTBR()) val hgatp = Input(new PTBR()) val vsatp = Input(new PTBR()) val sfence = Flipped(Valid(new SFenceReq)) val status = Input(new MStatus()) val hstatus = Input(new HStatus()) val gstatus = Input(new MStatus()) val pmp = Input(Vec(nPMPs, new PMP)) val perf = Output(new PTWPerfEvents()) val customCSRs = Flipped(coreParams.customCSRs) /** enable clock generated by ptw */ val clock_enabled = Output(Bool()) } /** PTE template for transmission * * contains useful methods to check PTE attributes * @see RV-priv spec 4.3.1 for pgae table entry format */ class PTE(implicit p: Parameters) extends CoreBundle()(p) { val reserved_for_future = UInt(10.W) val ppn = UInt(44.W) val reserved_for_software = Bits(2.W) /** dirty bit */ val d = Bool() /** access bit */ val a = Bool() /** global mapping */ val g = Bool() /** user mode accessible */ val u = Bool() /** whether the page is executable */ val x = Bool() /** whether the page is writable */ val w = Bool() /** whether the page is readable */ val r = Bool() /** valid bit */ val v = Bool() /** return true if find a pointer to next level page table */ def table(dummy: Int = 0) = v && !r && !w && !x && !d && !a && !u && reserved_for_future === 0.U /** return true if find a leaf PTE */ def leaf(dummy: Int = 0) = v && (r || (x && !w)) && a /** user read */ def ur(dummy: Int = 0) = sr() && u /** user write*/ def uw(dummy: Int = 0) = sw() && u /** user execute */ def ux(dummy: Int = 0) = sx() && u /** supervisor read */ def sr(dummy: Int = 0) = leaf() && r /** supervisor write */ def sw(dummy: Int = 0) = leaf() && w && d /** supervisor execute */ def sx(dummy: Int = 0) = leaf() && x /** full permission: writable and executable in user mode */ def isFullPerm(dummy: Int = 0) = uw() && ux() } /** L2TLB PTE template * * contains tag bits * @param nSets number of sets in L2TLB * @see RV-priv spec 4.3.1 for page table entry format */ class L2TLBEntry(nSets: Int)(implicit p: Parameters) extends CoreBundle()(p) with HasCoreParameters { val idxBits = log2Ceil(nSets) val tagBits = maxSVAddrBits - pgIdxBits - idxBits + (if (usingHypervisor) 1 else 0) val tag = UInt(tagBits.W) val ppn = UInt(ppnBits.W) /** dirty bit */ val d = Bool() /** access bit */ val a = Bool() /** user mode accessible */ val u = Bool() /** whether the page is executable */ val x = Bool() /** whether the page is writable */ val w = Bool() /** whether the page is readable */ val r = Bool() } /** PTW contains L2TLB, and performs page table walk for high level TLB, and cache queries from L1 TLBs(I$, D$, RoCC) * * It performs hierarchy page table query to mem for the desired leaf PTE and cache them in l2tlb. * Besides leaf PTEs, it also caches non-leaf PTEs in pte_cache to accerlerate the process. * * ==Structure== * - l2tlb : for leaf PTEs * - set-associative (configurable with [[CoreParams.nL2TLBEntries]]and [[CoreParams.nL2TLBWays]])) * - PLRU * - pte_cache: for non-leaf PTEs * - set-associative * - LRU * - s2_pte_cache: for non-leaf PTEs in 2-stage translation * - set-associative * - PLRU * * l2tlb Pipeline: 3 stage * {{{ * stage 0 : read * stage 1 : decode * stage 2 : hit check * }}} * ==State Machine== * s_ready: ready to reveive request from TLB * s_req: request mem; pte_cache hit judge * s_wait1: deal with l2tlb error * s_wait2: final hit judge * s_wait3: receive mem response * s_fragment_superpage: for superpage PTE * * @note l2tlb hit happens in s_req or s_wait1 * @see RV-priv spec 4.3-4.6 for Virtual-Memory System * @see RV-priv spec 8.5 for Two-Stage Address Translation * @todo details in two-stage translation */ class PTW(n: Int)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) { val io = IO(new Bundle { /** to n TLB */ val requestor = Flipped(Vec(n, new TLBPTWIO)) /** to HellaCache */ val mem = new HellaCacheIO /** to Core * * contains CSRs info and performance statistics */ val dpath = new DatapathPTWIO }) val s_ready :: s_req :: s_wait1 :: s_dummy1 :: s_wait2 :: s_wait3 :: s_dummy2 :: s_fragment_superpage :: Nil = Enum(8) val state = RegInit(s_ready) val l2_refill_wire = Wire(Bool()) /** Arbiter to arbite request from n TLB */ val arb = Module(new Arbiter(Valid(new PTWReq), n)) // use TLB req as arbitor's input arb.io.in <> io.requestor.map(_.req) // receive req only when s_ready and not in refill arb.io.out.ready := (state === s_ready) && !l2_refill_wire val resp_valid = RegNext(VecInit(Seq.fill(io.requestor.size)(false.B))) val clock_en = state =/= s_ready || l2_refill_wire || arb.io.out.valid || io.dpath.sfence.valid || io.dpath.customCSRs.disableDCacheClockGate io.dpath.clock_enabled := usingVM.B && clock_en val gated_clock = if (!usingVM || !tileParams.dcache.get.clockGate) clock else ClockGate(clock, clock_en, "ptw_clock_gate") withClock (gated_clock) { // entering gated-clock domain val invalidated = Reg(Bool()) /** current PTE level * {{{ * 0 <= count <= pgLevel-1 * count = pgLevel - 1 : leaf PTE * count < pgLevel - 1 : non-leaf PTE * }}} */ val count = Reg(UInt(log2Ceil(pgLevels).W)) val resp_ae_ptw = Reg(Bool()) val resp_ae_final = Reg(Bool()) val resp_pf = Reg(Bool()) val resp_gf = Reg(Bool()) val resp_hr = Reg(Bool()) val resp_hw = Reg(Bool()) val resp_hx = Reg(Bool()) val resp_fragmented_superpage = Reg(Bool()) /** tlb request */ val r_req = Reg(new PTWReq) /** current selected way in arbitor */ val r_req_dest = Reg(Bits()) // to respond to L1TLB : l2_hit // to construct mem.req.addr val r_pte = Reg(new PTE) val r_hgatp = Reg(new PTBR) // 2-stage pageLevel val aux_count = Reg(UInt(log2Ceil(pgLevels).W)) /** pte for 2-stage translation */ val aux_pte = Reg(new PTE) val gpa_pgoff = Reg(UInt(pgIdxBits.W)) // only valid in resp_gf case val stage2 = Reg(Bool()) val stage2_final = Reg(Bool()) val satp = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp, io.dpath.ptbr) val r_hgatp_initial_count = pgLevels.U - minPgLevels.U - r_hgatp.additionalPgLevels /** 2-stage translation both enable */ val do_both_stages = r_req.vstage1 && r_req.stage2 val max_count = count max aux_count val vpn = Mux(r_req.vstage1 && stage2, aux_pte.ppn, r_req.addr) val mem_resp_valid = RegNext(io.mem.resp.valid) val mem_resp_data = RegNext(io.mem.resp.bits.data) io.mem.uncached_resp.map { resp => assert(!(resp.valid && io.mem.resp.valid)) resp.ready := true.B when (resp.valid) { mem_resp_valid := true.B mem_resp_data := resp.bits.data } } // construct pte from mem.resp val (pte, invalid_paddr, invalid_gpa) = { val tmp = mem_resp_data.asTypeOf(new PTE()) val res = WireDefault(tmp) res.ppn := Mux(do_both_stages && !stage2, tmp.ppn(vpnBits.min(tmp.ppn.getWidth)-1, 0), tmp.ppn(ppnBits-1, 0)) when (tmp.r || tmp.w || tmp.x) { // for superpage mappings, make sure PPN LSBs are zero for (i <- 0 until pgLevels-1) when (count <= i.U && tmp.ppn((pgLevels-1-i)*pgLevelBits-1, (pgLevels-2-i)*pgLevelBits) =/= 0.U) { res.v := false.B } } (res, Mux(do_both_stages && !stage2, (tmp.ppn >> vpnBits) =/= 0.U, (tmp.ppn >> ppnBits) =/= 0.U), do_both_stages && !stage2 && checkInvalidHypervisorGPA(r_hgatp, tmp.ppn)) } // find non-leaf PTE, need traverse val traverse = pte.table() && !invalid_paddr && !invalid_gpa && count < (pgLevels-1).U /** address send to mem for enquerry */ val pte_addr = if (!usingVM) 0.U else { val vpn_idxs = (0 until pgLevels).map { i => val width = pgLevelBits + (if (i <= pgLevels - minPgLevels) hypervisorExtraAddrBits else 0) (vpn >> (pgLevels - i - 1) * pgLevelBits)(width - 1, 0) } val mask = Mux(stage2 && count === r_hgatp_initial_count, ((1 << (hypervisorExtraAddrBits + pgLevelBits)) - 1).U, ((1 << pgLevelBits) - 1).U) val vpn_idx = vpn_idxs(count) & mask val raw_pte_addr = ((r_pte.ppn << pgLevelBits) | vpn_idx) << log2Ceil(xLen / 8) val size = if (usingHypervisor) vaddrBits else paddrBits //use r_pte.ppn as page table base address //use vpn slice as offset raw_pte_addr.apply(size.min(raw_pte_addr.getWidth) - 1, 0) } /** stage2_pte_cache input addr */ val stage2_pte_cache_addr = if (!usingHypervisor) 0.U else { val vpn_idxs = (0 until pgLevels - 1).map { i => (r_req.addr >> (pgLevels - i - 1) * pgLevelBits)(pgLevelBits - 1, 0) } val vpn_idx = vpn_idxs(aux_count) val raw_s2_pte_cache_addr = Cat(aux_pte.ppn, vpn_idx) << log2Ceil(xLen / 8) raw_s2_pte_cache_addr(vaddrBits.min(raw_s2_pte_cache_addr.getWidth) - 1, 0) } def makeFragmentedSuperpagePPN(ppn: UInt): Seq[UInt] = { (pgLevels-1 until 0 by -1).map(i => Cat(ppn >> (pgLevelBits*i), r_req.addr(((pgLevelBits*i) min vpnBits)-1, 0).padTo(pgLevelBits*i))) } /** PTECache caches non-leaf PTE * @param s2 true: 2-stage address translation */ def makePTECache(s2: Boolean): (Bool, UInt) = if (coreParams.nPTECacheEntries == 0) { (false.B, 0.U) } else { val plru = new PseudoLRU(coreParams.nPTECacheEntries) val valid = RegInit(0.U(coreParams.nPTECacheEntries.W)) val tags = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor) 1 + vaddrBits else paddrBits).W))) // not include full pte, only ppn val data = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor && s2) vpnBits else ppnBits).W))) val can_hit = if (s2) count === r_hgatp_initial_count && aux_count < (pgLevels-1).U && r_req.vstage1 && stage2 && !stage2_final else count < (pgLevels-1).U && Mux(r_req.vstage1, stage2, !r_req.stage2) val can_refill = if (s2) do_both_stages && !stage2 && !stage2_final else can_hit val tag = if (s2) Cat(true.B, stage2_pte_cache_addr.padTo(vaddrBits)) else Cat(r_req.vstage1, pte_addr.padTo(if (usingHypervisor) vaddrBits else paddrBits)) val hits = tags.map(_ === tag).asUInt & valid val hit = hits.orR && can_hit // refill with mem response when (mem_resp_valid && traverse && can_refill && !hits.orR && !invalidated) { val r = Mux(valid.andR, plru.way, PriorityEncoder(~valid)) valid := valid | UIntToOH(r) tags(r) := tag data(r) := pte.ppn plru.access(r) } // replace when (hit && state === s_req) { plru.access(OHToUInt(hits)) } when (io.dpath.sfence.valid && (!io.dpath.sfence.bits.rs1 || usingHypervisor.B && io.dpath.sfence.bits.hg)) { valid := 0.U } val lcount = if (s2) aux_count else count for (i <- 0 until pgLevels-1) { ccover(hit && state === s_req && lcount === i.U, s"PTE_CACHE_HIT_L$i", s"PTE cache hit, level $i") } (hit, Mux1H(hits, data)) } // generate pte_cache val (pte_cache_hit, pte_cache_data) = makePTECache(false) // generate pte_cache with 2-stage translation val (stage2_pte_cache_hit, stage2_pte_cache_data) = makePTECache(true) // pte_cache hit or 2-stage pte_cache hit val pte_hit = RegNext(false.B) io.dpath.perf.pte_miss := false.B io.dpath.perf.pte_hit := pte_hit && (state === s_req) && !io.dpath.perf.l2hit assert(!(io.dpath.perf.l2hit && (io.dpath.perf.pte_miss || io.dpath.perf.pte_hit)), "PTE Cache Hit/Miss Performance Monitor Events are lower priority than L2TLB Hit event") // l2_refill happens when find the leaf pte val l2_refill = RegNext(false.B) l2_refill_wire := l2_refill io.dpath.perf.l2miss := false.B io.dpath.perf.l2hit := false.B // l2tlb val (l2_hit, l2_error, l2_pte, l2_tlb_ram) = if (coreParams.nL2TLBEntries == 0) (false.B, false.B, WireDefault(0.U.asTypeOf(new PTE)), None) else { val code = new ParityCode require(isPow2(coreParams.nL2TLBEntries)) require(isPow2(coreParams.nL2TLBWays)) require(coreParams.nL2TLBEntries >= coreParams.nL2TLBWays) val nL2TLBSets = coreParams.nL2TLBEntries / coreParams.nL2TLBWays require(isPow2(nL2TLBSets)) val idxBits = log2Ceil(nL2TLBSets) val l2_plru = new SetAssocLRU(nL2TLBSets, coreParams.nL2TLBWays, "plru") val ram = DescribedSRAM( name = "l2_tlb_ram", desc = "L2 TLB", size = nL2TLBSets, data = Vec(coreParams.nL2TLBWays, UInt(code.width(new L2TLBEntry(nL2TLBSets).getWidth).W)) ) val g = Reg(Vec(coreParams.nL2TLBWays, UInt(nL2TLBSets.W))) val valid = RegInit(VecInit(Seq.fill(coreParams.nL2TLBWays)(0.U(nL2TLBSets.W)))) // use r_req to construct tag val (r_tag, r_idx) = Split(Cat(r_req.vstage1, r_req.addr(maxSVAddrBits-pgIdxBits-1, 0)), idxBits) /** the valid vec for the selected set(including n ways) */ val r_valid_vec = valid.map(_(r_idx)).asUInt val r_valid_vec_q = Reg(UInt(coreParams.nL2TLBWays.W)) val r_l2_plru_way = Reg(UInt(log2Ceil(coreParams.nL2TLBWays max 1).W)) r_valid_vec_q := r_valid_vec // replacement way r_l2_plru_way := (if (coreParams.nL2TLBWays > 1) l2_plru.way(r_idx) else 0.U) // refill with r_pte(leaf pte) when (l2_refill && !invalidated) { val entry = Wire(new L2TLBEntry(nL2TLBSets)) entry.ppn := r_pte.ppn entry.d := r_pte.d entry.a := r_pte.a entry.u := r_pte.u entry.x := r_pte.x entry.w := r_pte.w entry.r := r_pte.r entry.tag := r_tag // if all the way are valid, use plru to select one way to be replaced, // otherwise use PriorityEncoderOH to select one val wmask = if (coreParams.nL2TLBWays > 1) Mux(r_valid_vec_q.andR, UIntToOH(r_l2_plru_way, coreParams.nL2TLBWays), PriorityEncoderOH(~r_valid_vec_q)) else 1.U(1.W) ram.write(r_idx, VecInit(Seq.fill(coreParams.nL2TLBWays)(code.encode(entry.asUInt))), wmask.asBools) val mask = UIntToOH(r_idx) for (way <- 0 until coreParams.nL2TLBWays) { when (wmask(way)) { valid(way) := valid(way) | mask g(way) := Mux(r_pte.g, g(way) | mask, g(way) & ~mask) } } } // sfence happens when (io.dpath.sfence.valid) { val hg = usingHypervisor.B && io.dpath.sfence.bits.hg for (way <- 0 until coreParams.nL2TLBWays) { valid(way) := Mux(!hg && io.dpath.sfence.bits.rs1, valid(way) & ~UIntToOH(io.dpath.sfence.bits.addr(idxBits+pgIdxBits-1, pgIdxBits)), Mux(!hg && io.dpath.sfence.bits.rs2, valid(way) & g(way), 0.U)) } } val s0_valid = !l2_refill && arb.io.out.fire val s0_suitable = arb.io.out.bits.bits.vstage1 === arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.need_gpa val s1_valid = RegNext(s0_valid && s0_suitable && arb.io.out.bits.valid) val s2_valid = RegNext(s1_valid) // read from tlb idx val s1_rdata = ram.read(arb.io.out.bits.bits.addr(idxBits-1, 0), s0_valid) val s2_rdata = s1_rdata.map(s1_rdway => code.decode(RegEnable(s1_rdway, s1_valid))) val s2_valid_vec = RegEnable(r_valid_vec, s1_valid) val s2_g_vec = RegEnable(VecInit(g.map(_(r_idx))), s1_valid) val s2_error = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && s2_rdata(way).error).orR when (s2_valid && s2_error) { valid.foreach { _ := 0.U }} // decode val s2_entry_vec = s2_rdata.map(_.uncorrected.asTypeOf(new L2TLBEntry(nL2TLBSets))) val s2_hit_vec = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && (r_tag === s2_entry_vec(way).tag)) val s2_hit = s2_valid && s2_hit_vec.orR io.dpath.perf.l2miss := s2_valid && !(s2_hit_vec.orR) io.dpath.perf.l2hit := s2_hit when (s2_hit) { l2_plru.access(r_idx, OHToUInt(s2_hit_vec)) assert((PopCount(s2_hit_vec) === 1.U) || s2_error, "L2 TLB multi-hit") } val s2_pte = Wire(new PTE) val s2_hit_entry = Mux1H(s2_hit_vec, s2_entry_vec) s2_pte.ppn := s2_hit_entry.ppn s2_pte.d := s2_hit_entry.d s2_pte.a := s2_hit_entry.a s2_pte.g := Mux1H(s2_hit_vec, s2_g_vec) s2_pte.u := s2_hit_entry.u s2_pte.x := s2_hit_entry.x s2_pte.w := s2_hit_entry.w s2_pte.r := s2_hit_entry.r s2_pte.v := true.B s2_pte.reserved_for_future := 0.U s2_pte.reserved_for_software := 0.U for (way <- 0 until coreParams.nL2TLBWays) { ccover(s2_hit && s2_hit_vec(way), s"L2_TLB_HIT_WAY$way", s"L2 TLB hit way$way") } (s2_hit, s2_error, s2_pte, Some(ram)) } // if SFENCE occurs during walk, don't refill PTE cache or L2 TLB until next walk invalidated := io.dpath.sfence.valid || (invalidated && state =/= s_ready) // mem request io.mem.keep_clock_enabled := false.B io.mem.req.valid := state === s_req || state === s_dummy1 io.mem.req.bits.phys := true.B io.mem.req.bits.cmd := M_XRD io.mem.req.bits.size := log2Ceil(xLen/8).U io.mem.req.bits.signed := false.B io.mem.req.bits.addr := pte_addr io.mem.req.bits.idx.foreach(_ := pte_addr) io.mem.req.bits.dprv := PRV.S.U // PTW accesses are S-mode by definition io.mem.req.bits.dv := do_both_stages && !stage2 io.mem.req.bits.tag := DontCare io.mem.req.bits.no_resp := false.B io.mem.req.bits.no_alloc := DontCare io.mem.req.bits.no_xcpt := DontCare io.mem.req.bits.data := DontCare io.mem.req.bits.mask := DontCare io.mem.s1_kill := l2_hit || (state =/= s_wait1) || resp_gf io.mem.s1_data := DontCare io.mem.s2_kill := false.B val pageGranularityPMPs = pmpGranularity >= (1 << pgIdxBits) require(!usingHypervisor || pageGranularityPMPs, s"hypervisor requires pmpGranularity >= ${1<<pgIdxBits}") val pmaPgLevelHomogeneous = (0 until pgLevels) map { i => val pgSize = BigInt(1) << (pgIdxBits + ((pgLevels - 1 - i) * pgLevelBits)) if (pageGranularityPMPs && i == pgLevels - 1) { require(TLBPageLookup.homogeneous(edge.manager.managers, pgSize), s"All memory regions must be $pgSize-byte aligned") true.B } else { TLBPageLookup(edge.manager.managers, xLen, p(CacheBlockBytes), pgSize, xLen/8)(r_pte.ppn << pgIdxBits).homogeneous } } val pmaHomogeneous = pmaPgLevelHomogeneous(count) val pmpHomogeneous = new PMPHomogeneityChecker(io.dpath.pmp).apply(r_pte.ppn << pgIdxBits, count) val homogeneous = pmaHomogeneous && pmpHomogeneous // response to tlb for (i <- 0 until io.requestor.size) { io.requestor(i).resp.valid := resp_valid(i) io.requestor(i).resp.bits.ae_ptw := resp_ae_ptw io.requestor(i).resp.bits.ae_final := resp_ae_final io.requestor(i).resp.bits.pf := resp_pf io.requestor(i).resp.bits.gf := resp_gf io.requestor(i).resp.bits.hr := resp_hr io.requestor(i).resp.bits.hw := resp_hw io.requestor(i).resp.bits.hx := resp_hx io.requestor(i).resp.bits.pte := r_pte io.requestor(i).resp.bits.level := max_count io.requestor(i).resp.bits.homogeneous := homogeneous || pageGranularityPMPs.B io.requestor(i).resp.bits.fragmented_superpage := resp_fragmented_superpage && pageGranularityPMPs.B io.requestor(i).resp.bits.gpa.valid := r_req.need_gpa io.requestor(i).resp.bits.gpa.bits := Cat(Mux(!stage2_final || !r_req.vstage1 || aux_count === (pgLevels - 1).U, aux_pte.ppn, makeFragmentedSuperpagePPN(aux_pte.ppn)(aux_count)), gpa_pgoff) io.requestor(i).resp.bits.gpa_is_pte := !stage2_final io.requestor(i).ptbr := io.dpath.ptbr io.requestor(i).hgatp := io.dpath.hgatp io.requestor(i).vsatp := io.dpath.vsatp io.requestor(i).customCSRs <> io.dpath.customCSRs io.requestor(i).status := io.dpath.status io.requestor(i).hstatus := io.dpath.hstatus io.requestor(i).gstatus := io.dpath.gstatus io.requestor(i).pmp := io.dpath.pmp } // control state machine val next_state = WireDefault(state) state := OptimizationBarrier(next_state) val do_switch = WireDefault(false.B) switch (state) { is (s_ready) { when (arb.io.out.fire) { val satp_initial_count = pgLevels.U - minPgLevels.U - satp.additionalPgLevels val vsatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.vsatp.additionalPgLevels val hgatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.hgatp.additionalPgLevels val aux_ppn = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp.ppn, arb.io.out.bits.bits.addr) r_req := arb.io.out.bits.bits r_req_dest := arb.io.chosen next_state := Mux(arb.io.out.bits.valid, s_req, s_ready) stage2 := arb.io.out.bits.bits.stage2 stage2_final := arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.vstage1 count := Mux(arb.io.out.bits.bits.stage2, hgatp_initial_count, satp_initial_count) aux_count := Mux(arb.io.out.bits.bits.vstage1, vsatp_initial_count, 0.U) aux_pte.ppn := aux_ppn aux_pte.reserved_for_future := 0.U resp_ae_ptw := false.B resp_ae_final := false.B resp_pf := false.B resp_gf := checkInvalidHypervisorGPA(io.dpath.hgatp, aux_ppn) && arb.io.out.bits.bits.stage2 resp_hr := true.B resp_hw := true.B resp_hx := true.B resp_fragmented_superpage := false.B r_hgatp := io.dpath.hgatp assert(!arb.io.out.bits.bits.need_gpa || arb.io.out.bits.bits.stage2) } } is (s_req) { when(stage2 && count === r_hgatp_initial_count) { gpa_pgoff := Mux(aux_count === (pgLevels-1).U, r_req.addr << (xLen/8).log2, stage2_pte_cache_addr) } // pte_cache hit when (stage2_pte_cache_hit) { aux_count := aux_count + 1.U aux_pte.ppn := stage2_pte_cache_data aux_pte.reserved_for_future := 0.U pte_hit := true.B }.elsewhen (pte_cache_hit) { count := count + 1.U pte_hit := true.B }.otherwise { next_state := Mux(io.mem.req.ready, s_wait1, s_req) } when(resp_gf) { next_state := s_ready resp_valid(r_req_dest) := true.B } } is (s_wait1) { // This Mux is for the l2_error case; the l2_hit && !l2_error case is overriden below next_state := Mux(l2_hit, s_req, s_wait2) } is (s_wait2) { next_state := s_wait3 io.dpath.perf.pte_miss := count < (pgLevels-1).U when (io.mem.s2_xcpt.ae.ld) { resp_ae_ptw := true.B next_state := s_ready resp_valid(r_req_dest) := true.B } } is (s_fragment_superpage) { next_state := s_ready resp_valid(r_req_dest) := true.B when (!homogeneous) { count := (pgLevels-1).U resp_fragmented_superpage := true.B } when (do_both_stages) { resp_fragmented_superpage := true.B } } } val merged_pte = { val superpage_masks = (0 until pgLevels).map(i => ((BigInt(1) << pte.ppn.getWidth) - (BigInt(1) << (pgLevels-1-i)*pgLevelBits)).U) val superpage_mask = superpage_masks(Mux(stage2_final, max_count, (pgLevels-1).U)) val stage1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), aux_pte.ppn((pgLevels-i-1)*pgLevelBits-1,0))) :+ pte.ppn val stage1_ppn = stage1_ppns(count) makePTE(stage1_ppn & superpage_mask, aux_pte) } r_pte := OptimizationBarrier( // l2tlb hit->find a leaf PTE(l2_pte), respond to L1TLB Mux(l2_hit && !l2_error && !resp_gf, l2_pte, // S2 PTE cache hit -> proceed to the next level of walking, update the r_pte with hgatp Mux(state === s_req && stage2_pte_cache_hit, makeHypervisorRootPTE(r_hgatp, stage2_pte_cache_data, l2_pte), // pte cache hit->find a non-leaf PTE(pte_cache),continue to request mem Mux(state === s_req && pte_cache_hit, makePTE(pte_cache_data, l2_pte), // 2-stage translation Mux(do_switch, makeHypervisorRootPTE(r_hgatp, pte.ppn, r_pte), // when mem respond, store mem.resp.pte Mux(mem_resp_valid, Mux(!traverse && r_req.vstage1 && stage2, merged_pte, pte), // fragment_superpage Mux(state === s_fragment_superpage && !homogeneous && count =/= (pgLevels - 1).U, makePTE(makeFragmentedSuperpagePPN(r_pte.ppn)(count), r_pte), // when tlb request come->request mem, use root address in satp(or vsatp,hgatp) Mux(arb.io.out.fire, Mux(arb.io.out.bits.bits.stage2, makeHypervisorRootPTE(io.dpath.hgatp, io.dpath.vsatp.ppn, r_pte), makePTE(satp.ppn, r_pte)), r_pte)))))))) when (l2_hit && !l2_error && !resp_gf) { assert(state === s_req || state === s_wait1) next_state := s_ready resp_valid(r_req_dest) := true.B count := (pgLevels-1).U } when (mem_resp_valid) { assert(state === s_wait3) next_state := s_req when (traverse) { when (do_both_stages && !stage2) { do_switch := true.B } count := count + 1.U }.otherwise { val gf = (stage2 && !stage2_final && !pte.ur()) || (pte.leaf() && pte.reserved_for_future === 0.U && invalid_gpa) val ae = pte.v && invalid_paddr val pf = pte.v && pte.reserved_for_future =/= 0.U val success = pte.v && !ae && !pf && !gf when (do_both_stages && !stage2_final && success) { when (stage2) { stage2 := false.B count := aux_count }.otherwise { stage2_final := true.B do_switch := true.B } }.otherwise { // find a leaf pte, start l2 refill l2_refill := success && count === (pgLevels-1).U && !r_req.need_gpa && (!r_req.vstage1 && !r_req.stage2 || do_both_stages && aux_count === (pgLevels-1).U && pte.isFullPerm()) count := max_count when (pageGranularityPMPs.B && !(count === (pgLevels-1).U && (!do_both_stages || aux_count === (pgLevels-1).U))) { next_state := s_fragment_superpage }.otherwise { next_state := s_ready resp_valid(r_req_dest) := true.B } resp_ae_ptw := ae && count < (pgLevels-1).U && pte.table() resp_ae_final := ae && pte.leaf() resp_pf := pf && !stage2 resp_gf := gf || (pf && stage2) resp_hr := !stage2 || (!pf && !gf && pte.ur()) resp_hw := !stage2 || (!pf && !gf && pte.uw()) resp_hx := !stage2 || (!pf && !gf && pte.ux()) } } } when (io.mem.s2_nack) { assert(state === s_wait2) next_state := s_req } when (do_switch) { aux_count := Mux(traverse, count + 1.U, count) count := r_hgatp_initial_count aux_pte := Mux(traverse, pte, { val s1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), r_req.addr(((pgLevels-i-1)*pgLevelBits min vpnBits)-1,0).padTo((pgLevels-i-1)*pgLevelBits))) :+ pte.ppn makePTE(s1_ppns(count), pte) }) stage2 := true.B } for (i <- 0 until pgLevels) { val leaf = mem_resp_valid && !traverse && count === i.U ccover(leaf && pte.v && !invalid_paddr && !invalid_gpa && pte.reserved_for_future === 0.U, s"L$i", s"successful page-table access, level $i") ccover(leaf && pte.v && invalid_paddr, s"L${i}_BAD_PPN_MSB", s"PPN too large, level $i") ccover(leaf && pte.v && invalid_gpa, s"L${i}_BAD_GPA_MSB", s"GPA too large, level $i") ccover(leaf && pte.v && pte.reserved_for_future =/= 0.U, s"L${i}_BAD_RSV_MSB", s"reserved MSBs set, level $i") ccover(leaf && !mem_resp_data(0), s"L${i}_INVALID_PTE", s"page not present, level $i") if (i != pgLevels-1) ccover(leaf && !pte.v && mem_resp_data(0), s"L${i}_BAD_PPN_LSB", s"PPN LSBs not zero, level $i") } ccover(mem_resp_valid && count === (pgLevels-1).U && pte.table(), s"TOO_DEEP", s"page table too deep") ccover(io.mem.s2_nack, "NACK", "D$ nacked page-table access") ccover(state === s_wait2 && io.mem.s2_xcpt.ae.ld, "AE", "access exception while walking page table") } // leaving gated-clock domain private def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = if (usingVM) property.cover(cond, s"PTW_$label", "MemorySystem;;" + desc) /** Relace PTE.ppn with ppn */ private def makePTE(ppn: UInt, default: PTE) = { val pte = WireDefault(default) pte.ppn := ppn pte } /** use hgatp and vpn to construct a new ppn */ private def makeHypervisorRootPTE(hgatp: PTBR, vpn: UInt, default: PTE) = { val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> (pgLevels-i)*pgLevelBits)) val lsbs = WireDefault(UInt(maxHypervisorExtraAddrBits.W), idxs(count)) val pte = WireDefault(default) pte.ppn := Cat(hgatp.ppn >> maxHypervisorExtraAddrBits, lsbs) pte } /** use hgatp and vpn to check for gpa out of range */ private def checkInvalidHypervisorGPA(hgatp: PTBR, vpn: UInt) = { val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> ((pgLevels-i)*pgLevelBits)+maxHypervisorExtraAddrBits)) idxs.extract(count) =/= 0.U } } /** Mix-ins for constructing tiles that might have a PTW */ trait CanHavePTW extends HasTileParameters with HasHellaCache { this: BaseTile => val module: CanHavePTWModule var nPTWPorts = 1 nDCachePorts += usingPTW.toInt } trait CanHavePTWModule extends HasHellaCacheModule { val outer: CanHavePTW val ptwPorts = ListBuffer(outer.dcache.module.io.ptw) val ptw = Module(new PTW(outer.nPTWPorts)(outer.dcache.node.edges.out(0), outer.p)) ptw.io.mem <> DontCare if (outer.usingPTW) { dcachePorts += ptw.io.mem } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File HierarchicalElement.scala: package freechips.rocketchip.subsystem import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.devices.debug.TLDebugModule import freechips.rocketchip.diplomacy.{BufferParams} import freechips.rocketchip.interrupts.IntXbar import freechips.rocketchip.prci.{ClockSinkParameters, ResetCrossingType, ClockCrossingType} import freechips.rocketchip.tile.{LookupByHartIdImpl, TraceBundle} import freechips.rocketchip.tilelink.{TLNode, TLIdentityNode, TLXbar, TLBuffer, TLInwardNode, TLOutwardNode} trait HierarchicalElementParams { val baseName: String // duplicated instances shouuld share a base name val uniqueName: String val clockSinkParams: ClockSinkParameters } abstract class InstantiableHierarchicalElementParams[ElementType <: BaseHierarchicalElement] extends HierarchicalElementParams /** An interface for describing the parameteization of how HierarchicalElements are connected to interconnects */ trait HierarchicalElementCrossingParamsLike { /** The type of clock crossing that should be inserted at the element boundary. */ def crossingType: ClockCrossingType /** Parameters describing the contents and behavior of the point where the element is attached as an interconnect master. */ def master: HierarchicalElementPortParamsLike /** Parameters describing the contents and behavior of the point where the element is attached as an interconnect slave. */ def slave: HierarchicalElementPortParamsLike /** The subnetwork location of the device selecting the apparent base address of MMIO devices inside the element */ def mmioBaseAddressPrefixWhere: TLBusWrapperLocation /** Inject a reset management subgraph that effects the element child reset only */ def resetCrossingType: ResetCrossingType /** Keep the element clock separate from the interconnect clock (e.g. even if they are synchronous to one another) */ def forceSeparateClockReset: Boolean } /** An interface for describing the parameterization of how a particular element port is connected to an interconnect */ trait HierarchicalElementPortParamsLike { /** The subnetwork location of the interconnect to which this element port should be connected. */ def where: TLBusWrapperLocation /** Allows port-specific adapters to be injected into the interconnect side of the attachment point. */ def injectNode(context: Attachable)(implicit p: Parameters): TLNode } abstract class BaseHierarchicalElement (val crossing: ClockCrossingType)(implicit p: Parameters) extends LazyModule()(p) with CrossesToOnlyOneClockDomain { def module: BaseHierarchicalElementModuleImp[BaseHierarchicalElement] protected val tlOtherMastersNode = TLIdentityNode() protected val tlMasterXbar = LazyModule(new TLXbar(nameSuffix = Some(s"MasterXbar_$desiredName"))) protected val tlSlaveXbar = LazyModule(new TLXbar(nameSuffix = Some(s"SlaveXbar_$desiredName"))) protected val intXbar = LazyModule(new IntXbar) def masterNode: TLOutwardNode def slaveNode: TLInwardNode /** Helper function to insert additional buffers on master ports at the boundary of the tile. * * The boundary buffering needed to cut feed-through paths is * microarchitecture specific, so this may need to be overridden * in subclasses of this class. */ def makeMasterBoundaryBuffers(crossing: ClockCrossingType)(implicit p: Parameters) = TLBuffer(BufferParams.none) /** Helper function to insert additional buffers on slave ports at the boundary of the tile. * * The boundary buffering needed to cut feed-through paths is * microarchitecture specific, so this may need to be overridden * in subclasses of this class. */ def makeSlaveBoundaryBuffers(crossing: ClockCrossingType)(implicit p: Parameters) = TLBuffer(BufferParams.none) } abstract class BaseHierarchicalElementModuleImp[+L <: BaseHierarchicalElement](val outer: L) extends LazyModuleImp(outer) File RocketTile.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.tile import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.devices.tilelink.{BasicBusBlockerParams, BasicBusBlocker} import freechips.rocketchip.diplomacy.{ AddressSet, DisableMonitors, BufferParams } import freechips.rocketchip.resources.{ SimpleDevice, Description, ResourceAnchors, ResourceBindings, ResourceBinding, Resource, ResourceAddress, } import freechips.rocketchip.interrupts.IntIdentityNode import freechips.rocketchip.tilelink.{TLIdentityNode, TLBuffer} import freechips.rocketchip.rocket.{ RocketCoreParams, ICacheParams, DCacheParams, BTBParams, HasHellaCache, HasICacheFrontend, ScratchpadSlavePort, HasICacheFrontendModule, Rocket } import freechips.rocketchip.subsystem.HierarchicalElementCrossingParamsLike import freechips.rocketchip.prci.{ClockSinkParameters, RationalCrossing, ClockCrossingType} import freechips.rocketchip.util.{Annotated, InOrderArbiter} import freechips.rocketchip.util.BooleanToAugmentedBoolean case class RocketTileBoundaryBufferParams(force: Boolean = false) case class RocketTileParams( core: RocketCoreParams = RocketCoreParams(), icache: Option[ICacheParams] = Some(ICacheParams()), dcache: Option[DCacheParams] = Some(DCacheParams()), btb: Option[BTBParams] = Some(BTBParams()), dataScratchpadBytes: Int = 0, tileId: Int = 0, beuAddr: Option[BigInt] = None, blockerCtrlAddr: Option[BigInt] = None, clockSinkParams: ClockSinkParameters = ClockSinkParameters(), boundaryBuffers: Option[RocketTileBoundaryBufferParams] = None ) extends InstantiableTileParams[RocketTile] { require(icache.isDefined) require(dcache.isDefined) val baseName = "rockettile" val uniqueName = s"${baseName}_$tileId" def instantiate(crossing: HierarchicalElementCrossingParamsLike, lookup: LookupByHartIdImpl)(implicit p: Parameters): RocketTile = { new RocketTile(this, crossing, lookup) } } class RocketTile private( val rocketParams: RocketTileParams, crossing: ClockCrossingType, lookup: LookupByHartIdImpl, q: Parameters) extends BaseTile(rocketParams, crossing, lookup, q) with SinksExternalInterrupts with SourcesExternalNotifications with HasLazyRoCC // implies CanHaveSharedFPU with CanHavePTW with HasHellaCache with HasHellaCache with HasICacheFrontend { // Private constructor ensures altered LazyModule.p is used implicitly def this(params: RocketTileParams, crossing: HierarchicalElementCrossingParamsLike, lookup: LookupByHartIdImpl)(implicit p: Parameters) = this(params, crossing.crossingType, lookup, p) val intOutwardNode = rocketParams.beuAddr map { _ => IntIdentityNode() } val slaveNode = TLIdentityNode() val masterNode = visibilityNode val dtim_adapter = tileParams.dcache.flatMap { d => d.scratch.map { s => LazyModule(new ScratchpadSlavePort(AddressSet.misaligned(s, d.dataScratchpadBytes), lazyCoreParamsView.coreDataBytes, tileParams.core.useAtomics && !tileParams.core.useAtomicsOnlyForIO)) }} dtim_adapter.foreach(lm => connectTLSlave(lm.node, lm.node.portParams.head.beatBytes)) val bus_error_unit = rocketParams.beuAddr map { a => val beu = LazyModule(new BusErrorUnit(new L1BusErrors, BusErrorUnitParams(a), xLen/8)) intOutwardNode.get := beu.intNode connectTLSlave(beu.node, xBytes) beu } val tile_master_blocker = tileParams.blockerCtrlAddr .map(BasicBusBlockerParams(_, xBytes, masterPortBeatBytes, deadlock = true)) .map(bp => LazyModule(new BasicBusBlocker(bp))) tile_master_blocker.foreach(lm => connectTLSlave(lm.controlNode, xBytes)) // TODO: this doesn't block other masters, e.g. RoCCs tlOtherMastersNode := tile_master_blocker.map { _.node := tlMasterXbar.node } getOrElse { tlMasterXbar.node } masterNode :=* tlOtherMastersNode DisableMonitors { implicit p => tlSlaveXbar.node :*= slaveNode } nDCachePorts += 1 /*core */ + (dtim_adapter.isDefined).toInt + rocketParams.core.vector.map(_.useDCache.toInt).getOrElse(0) val dtimProperty = dtim_adapter.map(d => Map( "sifive,dtim" -> d.device.asProperty)).getOrElse(Nil) val itimProperty = frontend.icache.itimProperty.toSeq.flatMap(p => Map("sifive,itim" -> p)) val beuProperty = bus_error_unit.map(d => Map( "sifive,buserror" -> d.device.asProperty)).getOrElse(Nil) val cpuDevice: SimpleDevice = new SimpleDevice("cpu", Seq("sifive,rocket0", "riscv")) { override def parent = Some(ResourceAnchors.cpus) override def describe(resources: ResourceBindings): Description = { val Description(name, mapping) = super.describe(resources) Description(name, mapping ++ cpuProperties ++ nextLevelCacheProperty ++ tileProperties ++ dtimProperty ++ itimProperty ++ beuProperty) } } val vector_unit = rocketParams.core.vector.map(v => LazyModule(v.build(p))) vector_unit.foreach(vu => tlMasterXbar.node :=* vu.atlNode) vector_unit.foreach(vu => tlOtherMastersNode :=* vu.tlNode) ResourceBinding { Resource(cpuDevice, "reg").bind(ResourceAddress(tileId)) } override lazy val module = new RocketTileModuleImp(this) override def makeMasterBoundaryBuffers(crossing: ClockCrossingType)(implicit p: Parameters) = (rocketParams.boundaryBuffers, crossing) match { case (Some(RocketTileBoundaryBufferParams(true )), _) => TLBuffer() case (Some(RocketTileBoundaryBufferParams(false)), _: RationalCrossing) => TLBuffer(BufferParams.none, BufferParams.flow, BufferParams.none, BufferParams.flow, BufferParams(1)) case _ => TLBuffer(BufferParams.none) } override def makeSlaveBoundaryBuffers(crossing: ClockCrossingType)(implicit p: Parameters) = (rocketParams.boundaryBuffers, crossing) match { case (Some(RocketTileBoundaryBufferParams(true )), _) => TLBuffer() case (Some(RocketTileBoundaryBufferParams(false)), _: RationalCrossing) => TLBuffer(BufferParams.flow, BufferParams.none, BufferParams.none, BufferParams.none, BufferParams.none) case _ => TLBuffer(BufferParams.none) } } class RocketTileModuleImp(outer: RocketTile) extends BaseTileModuleImp(outer) with HasFpuOpt with HasLazyRoCCModule with HasICacheFrontendModule { Annotated.params(this, outer.rocketParams) val core = Module(new Rocket(outer)(outer.p)) outer.vector_unit.foreach { v => core.io.vector.get <> v.module.io.core v.module.io.tlb <> outer.dcache.module.io.tlb_port } // reset vector is connected in the Frontend to s2_pc core.io.reset_vector := DontCare // Report unrecoverable error conditions; for now the only cause is cache ECC errors outer.reportHalt(List(outer.dcache.module.io.errors)) // Report when the tile has ceased to retire instructions; for now the only cause is clock gating outer.reportCease(outer.rocketParams.core.clockGate.option( !outer.dcache.module.io.cpu.clock_enabled && !outer.frontend.module.io.cpu.clock_enabled && !ptw.io.dpath.clock_enabled && core.io.cease)) outer.reportWFI(Some(core.io.wfi)) outer.decodeCoreInterrupts(core.io.interrupts) // Decode the interrupt vector outer.bus_error_unit.foreach { beu => core.io.interrupts.buserror.get := beu.module.io.interrupt beu.module.io.errors.dcache := outer.dcache.module.io.errors beu.module.io.errors.icache := outer.frontend.module.io.errors } core.io.interrupts.nmi.foreach { nmi => nmi := outer.nmiSinkNode.get.bundle } // Pass through various external constants and reports that were bundle-bridged into the tile outer.traceSourceNode.bundle <> core.io.trace core.io.traceStall := outer.traceAuxSinkNode.bundle.stall outer.bpwatchSourceNode.bundle <> core.io.bpwatch core.io.hartid := outer.hartIdSinkNode.bundle require(core.io.hartid.getWidth >= outer.hartIdSinkNode.bundle.getWidth, s"core hartid wire (${core.io.hartid.getWidth}b) truncates external hartid wire (${outer.hartIdSinkNode.bundle.getWidth}b)") // Connect the core pipeline to other intra-tile modules outer.frontend.module.io.cpu <> core.io.imem dcachePorts += core.io.dmem // TODO outer.dcachePorts += () => module.core.io.dmem ?? fpuOpt foreach { fpu => core.io.fpu :<>= fpu.io.waiveAs[FPUCoreIO](_.cp_req, _.cp_resp) } if (fpuOpt.isEmpty) { core.io.fpu := DontCare } outer.vector_unit foreach { v => if (outer.rocketParams.core.vector.get.useDCache) { dcachePorts += v.module.io.dmem } else { v.module.io.dmem := DontCare } } core.io.ptw <> ptw.io.dpath // Connect the coprocessor interfaces if (outer.roccs.size > 0) { cmdRouter.get.io.in <> core.io.rocc.cmd outer.roccs.foreach{ lm => lm.module.io.exception := core.io.rocc.exception lm.module.io.fpu_req.ready := DontCare lm.module.io.fpu_resp.valid := DontCare lm.module.io.fpu_resp.bits.data := DontCare lm.module.io.fpu_resp.bits.exc := DontCare } core.io.rocc.resp <> respArb.get.io.out core.io.rocc.busy <> (cmdRouter.get.io.busy || outer.roccs.map(_.module.io.busy).reduce(_ || _)) core.io.rocc.interrupt := outer.roccs.map(_.module.io.interrupt).reduce(_ || _) (core.io.rocc.csrs zip roccCSRIOs.flatten).foreach { t => t._2 <> t._1 } } else { // tie off core.io.rocc.cmd.ready := false.B core.io.rocc.resp.valid := false.B core.io.rocc.resp.bits := DontCare core.io.rocc.busy := DontCare core.io.rocc.interrupt := DontCare } // Dont care mem since not all RoCC need accessing memory core.io.rocc.mem := DontCare // Rocket has higher priority to DTIM than other TileLink clients outer.dtim_adapter.foreach { lm => dcachePorts += lm.module.io.dmem } // TODO eliminate this redundancy val h = dcachePorts.size val c = core.dcacheArbPorts val o = outer.nDCachePorts require(h == c, s"port list size was $h, core expected $c") require(h == o, s"port list size was $h, outer counted $o") // TODO figure out how to move the below into their respective mix-ins dcacheArb.io.requestor <> dcachePorts.toSeq ptw.io.requestor <> ptwPorts.toSeq } trait HasFpuOpt { this: RocketTileModuleImp => val fpuOpt = outer.tileParams.core.fpu.map(params => Module(new FPU(params)(outer.p))) fpuOpt.foreach { fpu => val nRoCCFPUPorts = outer.roccs.count(_.usesFPU) val nFPUPorts = nRoCCFPUPorts + outer.rocketParams.core.useVector.toInt if (nFPUPorts > 0) { val fpArb = Module(new InOrderArbiter(new FPInput()(outer.p), new FPResult()(outer.p), nFPUPorts)) fpu.io.cp_req <> fpArb.io.out_req fpArb.io.out_resp <> fpu.io.cp_resp val fp_rocc_ios = outer.roccs.filter(_.usesFPU).map(_.module.io) for (i <- 0 until nRoCCFPUPorts) { fpArb.io.in_req(i) <> fp_rocc_ios(i).fpu_req fp_rocc_ios(i).fpu_resp <> fpArb.io.in_resp(i) } outer.vector_unit.foreach(vu => { fpArb.io.in_req(nRoCCFPUPorts) <> vu.module.io.fp_req vu.module.io.fp_resp <> fpArb.io.in_resp(nRoCCFPUPorts) }) } else { fpu.io.cp_req.valid := false.B fpu.io.cp_req.bits := DontCare fpu.io.cp_resp.ready := false.B } } } File BundleBridgeNexus.scala: package org.chipsalliance.diplomacy.bundlebridge import chisel3.{chiselTypeOf, ActualDirection, Data, Reg} import chisel3.reflect.DataMirror import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyRawModuleImp} class BundleBridgeNexus[T <: Data]( inputFn: Seq[T] => T, outputFn: (T, Int) => Seq[T], default: Option[() => T] = None, inputRequiresOutput: Boolean = false, override val shouldBeInlined: Boolean = true )( implicit p: Parameters) extends LazyModule { val node = BundleBridgeNexusNode[T](default, inputRequiresOutput) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val defaultWireOpt = default.map(_()) val inputs: Seq[T] = node.in.map(_._1) inputs.foreach { i => require( DataMirror.checkTypeEquivalence(i, inputs.head), s"${node.context} requires all inputs have equivalent Chisel Data types, but got\n$i\nvs\n${inputs.head}" ) } inputs.flatMap(getElements).foreach { elt => DataMirror.directionOf(elt) match { case ActualDirection.Output => () case ActualDirection.Unspecified => () case _ => require(false, s"${node.context} can only be used with Output-directed Bundles") } } val outputs: Seq[T] = if (node.out.size > 0) { val broadcast: T = if (inputs.size >= 1) inputFn(inputs) else defaultWireOpt.get outputFn(broadcast, node.out.size) } else { Nil } val typeName = outputs.headOption.map(_.typeName).getOrElse("NoOutput") override def desiredName = s"BundleBridgeNexus_$typeName" node.out.map(_._1).foreach { o => require( DataMirror.checkTypeEquivalence(o, outputs.head), s"${node.context} requires all outputs have equivalent Chisel Data types, but got\n$o\nvs\n${outputs.head}" ) } require( outputs.size == node.out.size, s"${node.context} outputFn must generate one output wire per edgeOut, but got ${outputs.size} vs ${node.out.size}" ) node.out.zip(outputs).foreach { case ((out, _), bcast) => out := bcast } } } object BundleBridgeNexus { def safeRegNext[T <: Data](x: T): T = { val reg = Reg(chiselTypeOf(x)) reg := x reg } def requireOne[T <: Data](registered: Boolean)(seq: Seq[T]): T = { require(seq.size == 1, "BundleBroadcast default requires one input") if (registered) safeRegNext(seq.head) else seq.head } def orReduction[T <: Data](registered: Boolean)(seq: Seq[T]): T = { val x = seq.reduce((a, b) => (a.asUInt | b.asUInt).asTypeOf(seq.head)) if (registered) safeRegNext(x) else x } def fillN[T <: Data](registered: Boolean)(x: T, n: Int): Seq[T] = Seq.fill(n) { if (registered) safeRegNext(x) else x } def apply[T <: Data]( inputFn: Seq[T] => T = orReduction[T](false) _, outputFn: (T, Int) => Seq[T] = fillN[T](false) _, default: Option[() => T] = None, inputRequiresOutput: Boolean = false, shouldBeInlined: Boolean = true )( implicit p: Parameters ): BundleBridgeNexusNode[T] = { val nexus = LazyModule(new BundleBridgeNexus[T](inputFn, outputFn, default, inputRequiresOutput, shouldBeInlined)) nexus.node } }
module RocketTile_4( // @[RocketTile.scala:141:7] input clock, // @[RocketTile.scala:141:7] input reset, // @[RocketTile.scala:141:7] input auto_buffer_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_buffer_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_buffer_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_buffer_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_buffer_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_buffer_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_b_ready, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_b_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_buffer_out_b_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_buffer_out_b_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_buffer_out_b_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_buffer_out_b_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_buffer_out_b_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_buffer_out_b_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_buffer_out_b_bits_data, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_b_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_c_ready, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_c_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_c_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_buffer_out_c_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_buffer_out_c_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_buffer_out_c_bits_address, // @[LazyModuleImp.scala:107:25] output [63:0] auto_buffer_out_c_bits_data, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_buffer_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_buffer_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_buffer_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_buffer_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [2:0] auto_buffer_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_buffer_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_e_ready, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_e_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_e_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_wfi_out_0, // @[LazyModuleImp.scala:107:25] input auto_int_local_in_3_0, // @[LazyModuleImp.scala:107:25] input auto_int_local_in_2_0, // @[LazyModuleImp.scala:107:25] input auto_int_local_in_1_0, // @[LazyModuleImp.scala:107:25] input auto_int_local_in_1_1, // @[LazyModuleImp.scala:107:25] input auto_int_local_in_0_0, // @[LazyModuleImp.scala:107:25] output auto_trace_source_out_insns_0_valid, // @[LazyModuleImp.scala:107:25] output [39:0] auto_trace_source_out_insns_0_iaddr, // @[LazyModuleImp.scala:107:25] output [31:0] auto_trace_source_out_insns_0_insn, // @[LazyModuleImp.scala:107:25] output [2:0] auto_trace_source_out_insns_0_priv, // @[LazyModuleImp.scala:107:25] output auto_trace_source_out_insns_0_exception, // @[LazyModuleImp.scala:107:25] output auto_trace_source_out_insns_0_interrupt, // @[LazyModuleImp.scala:107:25] output [63:0] auto_trace_source_out_insns_0_cause, // @[LazyModuleImp.scala:107:25] output [39:0] auto_trace_source_out_insns_0_tval, // @[LazyModuleImp.scala:107:25] output [63:0] auto_trace_source_out_time, // @[LazyModuleImp.scala:107:25] input [2:0] auto_hartid_in // @[LazyModuleImp.scala:107:25] ); wire buffer_auto_in_e_valid; // @[Buffer.scala:40:9] wire buffer_auto_in_e_ready; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_e_bits_sink; // @[Buffer.scala:40:9] wire buffer_auto_in_d_valid; // @[Buffer.scala:40:9] wire buffer_auto_in_d_ready; // @[Buffer.scala:40:9] wire buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_in_d_bits_data; // @[Buffer.scala:40:9] wire buffer_auto_in_d_bits_denied; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_d_bits_sink; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_d_bits_source; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_in_d_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_d_bits_param; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_d_bits_opcode; // @[Buffer.scala:40:9] wire buffer_auto_in_c_valid; // @[Buffer.scala:40:9] wire buffer_auto_in_c_ready; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_in_c_bits_data; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_in_c_bits_address; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_c_bits_source; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_in_c_bits_size; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_c_bits_param; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_c_bits_opcode; // @[Buffer.scala:40:9] wire buffer_auto_in_b_valid; // @[Buffer.scala:40:9] wire buffer_auto_in_b_ready; // @[Buffer.scala:40:9] wire buffer_auto_in_b_bits_corrupt; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_in_b_bits_data; // @[Buffer.scala:40:9] wire [7:0] buffer_auto_in_b_bits_mask; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_in_b_bits_address; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_b_bits_source; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_in_b_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_b_bits_param; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_b_bits_opcode; // @[Buffer.scala:40:9] wire buffer_auto_in_a_valid; // @[Buffer.scala:40:9] wire buffer_auto_in_a_ready; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_in_a_bits_data; // @[Buffer.scala:40:9] wire [7:0] buffer_auto_in_a_bits_mask; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_in_a_bits_address; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_a_bits_source; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_in_a_bits_size; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_a_bits_param; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_a_bits_opcode; // @[Buffer.scala:40:9] wire widget_1_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9] wire [1:0] widget_1_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9] wire [31:0] widget_1_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_e_ready; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9] wire [1:0] widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_c_ready; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_b_valid; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_b_bits_corrupt; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_out_b_bits_data; // @[WidthWidget.scala:27:9] wire [7:0] widget_auto_anon_out_b_bits_mask; // @[WidthWidget.scala:27:9] wire [31:0] widget_auto_anon_out_b_bits_address; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_b_bits_source; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_out_b_bits_size; // @[WidthWidget.scala:27:9] wire [1:0] widget_auto_anon_out_b_bits_param; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_b_bits_opcode; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_e_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_e_bits_sink; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_c_valid; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_in_c_bits_data; // @[WidthWidget.scala:27:9] wire [31:0] widget_auto_anon_in_c_bits_address; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_c_bits_source; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_in_c_bits_size; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_c_bits_param; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_c_bits_opcode; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_b_ready; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9] wire [7:0] widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9] wire [31:0] widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9] wire [2:0] broadcast_2_auto_in_0_action; // @[BundleBridgeNexus.scala:20:9] wire broadcast_2_auto_in_0_valid_0; // @[BundleBridgeNexus.scala:20:9] wire [2:0] broadcast_auto_in; // @[BundleBridgeNexus.scala:20:9] wire _core_io_imem_might_request; // @[RocketTile.scala:147:20] wire _core_io_imem_req_valid; // @[RocketTile.scala:147:20] wire [39:0] _core_io_imem_req_bits_pc; // @[RocketTile.scala:147:20] wire _core_io_imem_req_bits_speculative; // @[RocketTile.scala:147:20] wire _core_io_imem_sfence_valid; // @[RocketTile.scala:147:20] wire _core_io_imem_sfence_bits_rs1; // @[RocketTile.scala:147:20] wire _core_io_imem_sfence_bits_rs2; // @[RocketTile.scala:147:20] wire [38:0] _core_io_imem_sfence_bits_addr; // @[RocketTile.scala:147:20] wire _core_io_imem_sfence_bits_asid; // @[RocketTile.scala:147:20] wire _core_io_imem_sfence_bits_hv; // @[RocketTile.scala:147:20] wire _core_io_imem_sfence_bits_hg; // @[RocketTile.scala:147:20] wire _core_io_imem_resp_ready; // @[RocketTile.scala:147:20] wire _core_io_imem_btb_update_valid; // @[RocketTile.scala:147:20] wire [1:0] _core_io_imem_btb_update_bits_prediction_cfiType; // @[RocketTile.scala:147:20] wire _core_io_imem_btb_update_bits_prediction_taken; // @[RocketTile.scala:147:20] wire [1:0] _core_io_imem_btb_update_bits_prediction_mask; // @[RocketTile.scala:147:20] wire _core_io_imem_btb_update_bits_prediction_bridx; // @[RocketTile.scala:147:20] wire [38:0] _core_io_imem_btb_update_bits_prediction_target; // @[RocketTile.scala:147:20] wire [4:0] _core_io_imem_btb_update_bits_prediction_entry; // @[RocketTile.scala:147:20] wire [7:0] _core_io_imem_btb_update_bits_prediction_bht_history; // @[RocketTile.scala:147:20] wire _core_io_imem_btb_update_bits_prediction_bht_value; // @[RocketTile.scala:147:20] wire [38:0] _core_io_imem_btb_update_bits_pc; // @[RocketTile.scala:147:20] wire [38:0] _core_io_imem_btb_update_bits_target; // @[RocketTile.scala:147:20] wire _core_io_imem_btb_update_bits_isValid; // @[RocketTile.scala:147:20] wire [38:0] _core_io_imem_btb_update_bits_br_pc; // @[RocketTile.scala:147:20] wire [1:0] _core_io_imem_btb_update_bits_cfiType; // @[RocketTile.scala:147:20] wire _core_io_imem_bht_update_valid; // @[RocketTile.scala:147:20] wire [7:0] _core_io_imem_bht_update_bits_prediction_history; // @[RocketTile.scala:147:20] wire _core_io_imem_bht_update_bits_prediction_value; // @[RocketTile.scala:147:20] wire [38:0] _core_io_imem_bht_update_bits_pc; // @[RocketTile.scala:147:20] wire _core_io_imem_bht_update_bits_branch; // @[RocketTile.scala:147:20] wire _core_io_imem_bht_update_bits_taken; // @[RocketTile.scala:147:20] wire _core_io_imem_bht_update_bits_mispredict; // @[RocketTile.scala:147:20] wire _core_io_imem_flush_icache; // @[RocketTile.scala:147:20] wire _core_io_imem_progress; // @[RocketTile.scala:147:20] wire _core_io_dmem_req_valid; // @[RocketTile.scala:147:20] wire [39:0] _core_io_dmem_req_bits_addr; // @[RocketTile.scala:147:20] wire [6:0] _core_io_dmem_req_bits_tag; // @[RocketTile.scala:147:20] wire [4:0] _core_io_dmem_req_bits_cmd; // @[RocketTile.scala:147:20] wire [1:0] _core_io_dmem_req_bits_size; // @[RocketTile.scala:147:20] wire _core_io_dmem_req_bits_signed; // @[RocketTile.scala:147:20] wire [1:0] _core_io_dmem_req_bits_dprv; // @[RocketTile.scala:147:20] wire _core_io_dmem_req_bits_dv; // @[RocketTile.scala:147:20] wire _core_io_dmem_req_bits_no_resp; // @[RocketTile.scala:147:20] wire _core_io_dmem_s1_kill; // @[RocketTile.scala:147:20] wire [63:0] _core_io_dmem_s1_data_data; // @[RocketTile.scala:147:20] wire _core_io_dmem_keep_clock_enabled; // @[RocketTile.scala:147:20] wire [3:0] _core_io_ptw_ptbr_mode; // @[RocketTile.scala:147:20] wire [43:0] _core_io_ptw_ptbr_ppn; // @[RocketTile.scala:147:20] wire _core_io_ptw_sfence_valid; // @[RocketTile.scala:147:20] wire _core_io_ptw_sfence_bits_rs1; // @[RocketTile.scala:147:20] wire _core_io_ptw_sfence_bits_rs2; // @[RocketTile.scala:147:20] wire [38:0] _core_io_ptw_sfence_bits_addr; // @[RocketTile.scala:147:20] wire _core_io_ptw_sfence_bits_asid; // @[RocketTile.scala:147:20] wire _core_io_ptw_sfence_bits_hv; // @[RocketTile.scala:147:20] wire _core_io_ptw_sfence_bits_hg; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_debug; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_cease; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_wfi; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_status_isa; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_status_dprv; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_dv; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_status_prv; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_v; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_sd; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_mpv; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_gva; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_tsr; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_tw; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_tvm; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_mxr; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_sum; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_mprv; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_status_fs; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_status_mpp; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_spp; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_mpie; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_spie; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_mie; // @[RocketTile.scala:147:20] wire _core_io_ptw_status_sie; // @[RocketTile.scala:147:20] wire _core_io_ptw_hstatus_spvp; // @[RocketTile.scala:147:20] wire _core_io_ptw_hstatus_spv; // @[RocketTile.scala:147:20] wire _core_io_ptw_hstatus_gva; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_debug; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_cease; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_wfi; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_gstatus_isa; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_gstatus_dprv; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_dv; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_gstatus_prv; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_v; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_sd; // @[RocketTile.scala:147:20] wire [22:0] _core_io_ptw_gstatus_zero2; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_mpv; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_gva; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_mbe; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_sbe; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_gstatus_sxl; // @[RocketTile.scala:147:20] wire [7:0] _core_io_ptw_gstatus_zero1; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_tsr; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_tw; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_tvm; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_mxr; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_sum; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_mprv; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_gstatus_fs; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_gstatus_mpp; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_gstatus_vs; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_spp; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_mpie; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_ube; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_spie; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_upie; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_mie; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_hie; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_sie; // @[RocketTile.scala:147:20] wire _core_io_ptw_gstatus_uie; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_0_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_0_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_0_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_0_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_0_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_0_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_0_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_1_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_1_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_1_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_1_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_1_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_1_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_1_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_2_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_2_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_2_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_2_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_2_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_2_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_2_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_3_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_3_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_3_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_3_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_3_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_3_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_3_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_4_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_4_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_4_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_4_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_4_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_4_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_4_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_5_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_5_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_5_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_5_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_5_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_5_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_5_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_6_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_6_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_6_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_6_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_6_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_6_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_6_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_7_cfg_l; // @[RocketTile.scala:147:20] wire [1:0] _core_io_ptw_pmp_7_cfg_a; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_7_cfg_x; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_7_cfg_w; // @[RocketTile.scala:147:20] wire _core_io_ptw_pmp_7_cfg_r; // @[RocketTile.scala:147:20] wire [29:0] _core_io_ptw_pmp_7_addr; // @[RocketTile.scala:147:20] wire [31:0] _core_io_ptw_pmp_7_mask; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_0_ren; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_0_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_0_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_0_value; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_1_ren; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_1_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_1_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_1_value; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_2_ren; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_2_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_2_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_2_value; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_3_ren; // @[RocketTile.scala:147:20] wire _core_io_ptw_customCSRs_csrs_3_wen; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_3_wdata; // @[RocketTile.scala:147:20] wire [63:0] _core_io_ptw_customCSRs_csrs_3_value; // @[RocketTile.scala:147:20] wire [2:0] _core_io_fpu_hartid; // @[RocketTile.scala:147:20] wire [63:0] _core_io_fpu_time; // @[RocketTile.scala:147:20] wire [31:0] _core_io_fpu_inst; // @[RocketTile.scala:147:20] wire [63:0] _core_io_fpu_fromint_data; // @[RocketTile.scala:147:20] wire [2:0] _core_io_fpu_fcsr_rm; // @[RocketTile.scala:147:20] wire _core_io_fpu_ll_resp_val; // @[RocketTile.scala:147:20] wire [2:0] _core_io_fpu_ll_resp_type; // @[RocketTile.scala:147:20] wire [4:0] _core_io_fpu_ll_resp_tag; // @[RocketTile.scala:147:20] wire [63:0] _core_io_fpu_ll_resp_data; // @[RocketTile.scala:147:20] wire _core_io_fpu_valid; // @[RocketTile.scala:147:20] wire _core_io_fpu_killx; // @[RocketTile.scala:147:20] wire _core_io_fpu_killm; // @[RocketTile.scala:147:20] wire _core_io_fpu_keep_clock_enabled; // @[RocketTile.scala:147:20] wire _core_io_wfi; // @[RocketTile.scala:147:20] wire _ptw_io_requestor_0_req_ready; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_valid; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_ae_ptw; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_ae_final; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pf; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_gf; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_hr; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_hw; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_hx; // @[PTW.scala:802:19] wire [9:0] _ptw_io_requestor_0_resp_bits_pte_reserved_for_future; // @[PTW.scala:802:19] wire [43:0] _ptw_io_requestor_0_resp_bits_pte_ppn; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_resp_bits_pte_reserved_for_software; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_d; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_g; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_u; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_r; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_pte_v; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_resp_bits_level; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_homogeneous; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_gpa_valid; // @[PTW.scala:802:19] wire [38:0] _ptw_io_requestor_0_resp_bits_gpa_bits; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_resp_bits_gpa_is_pte; // @[PTW.scala:802:19] wire [3:0] _ptw_io_requestor_0_ptbr_mode; // @[PTW.scala:802:19] wire [43:0] _ptw_io_requestor_0_ptbr_ppn; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_debug; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_cease; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_wfi; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_status_isa; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_status_dprv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_dv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_status_prv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_v; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_sd; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_mpv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_tsr; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_tw; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_tvm; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_mxr; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_sum; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_mprv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_status_fs; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_status_mpp; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_spp; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_mpie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_spie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_mie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_status_sie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_hstatus_spvp; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_hstatus_spv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_hstatus_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_debug; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_cease; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_wfi; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_gstatus_isa; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_gstatus_dprv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_dv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_gstatus_prv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_v; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_sd; // @[PTW.scala:802:19] wire [22:0] _ptw_io_requestor_0_gstatus_zero2; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_mpv; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_mbe; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_sbe; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_gstatus_sxl; // @[PTW.scala:802:19] wire [7:0] _ptw_io_requestor_0_gstatus_zero1; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_tsr; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_tw; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_tvm; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_mxr; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_sum; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_mprv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_gstatus_fs; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_gstatus_mpp; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_gstatus_vs; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_spp; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_mpie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_ube; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_spie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_upie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_mie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_hie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_sie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_gstatus_uie; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_0_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_0_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_0_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_0_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_0_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_0_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_0_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_1_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_1_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_1_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_1_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_1_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_1_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_1_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_2_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_2_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_2_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_2_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_2_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_2_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_2_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_3_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_3_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_3_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_3_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_3_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_3_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_3_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_4_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_4_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_4_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_4_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_4_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_4_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_4_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_5_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_5_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_5_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_5_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_5_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_5_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_5_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_6_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_6_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_6_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_6_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_6_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_6_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_6_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_7_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_0_pmp_7_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_7_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_7_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_pmp_7_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_0_pmp_7_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_0_pmp_7_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_0_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_0_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_0_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_0_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_1_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_1_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_1_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_1_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_2_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_2_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_2_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_2_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_3_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_0_customCSRs_csrs_3_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_3_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_0_customCSRs_csrs_3_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_req_ready; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_valid; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_ae_ptw; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_ae_final; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pf; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_gf; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_hr; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_hw; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_hx; // @[PTW.scala:802:19] wire [9:0] _ptw_io_requestor_1_resp_bits_pte_reserved_for_future; // @[PTW.scala:802:19] wire [43:0] _ptw_io_requestor_1_resp_bits_pte_ppn; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_resp_bits_pte_reserved_for_software; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_d; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_g; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_u; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_r; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_pte_v; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_resp_bits_level; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_homogeneous; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_gpa_valid; // @[PTW.scala:802:19] wire [38:0] _ptw_io_requestor_1_resp_bits_gpa_bits; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_resp_bits_gpa_is_pte; // @[PTW.scala:802:19] wire [3:0] _ptw_io_requestor_1_ptbr_mode; // @[PTW.scala:802:19] wire [43:0] _ptw_io_requestor_1_ptbr_ppn; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_debug; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_cease; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_wfi; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_status_isa; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_status_dprv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_dv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_status_prv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_v; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_sd; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_mpv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_tsr; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_tw; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_tvm; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_mxr; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_sum; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_mprv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_status_fs; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_status_mpp; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_spp; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_mpie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_spie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_mie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_status_sie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_hstatus_spvp; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_hstatus_spv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_hstatus_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_debug; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_cease; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_wfi; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_gstatus_isa; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_gstatus_dprv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_dv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_gstatus_prv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_v; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_sd; // @[PTW.scala:802:19] wire [22:0] _ptw_io_requestor_1_gstatus_zero2; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_mpv; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_gva; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_mbe; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_sbe; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_gstatus_sxl; // @[PTW.scala:802:19] wire [7:0] _ptw_io_requestor_1_gstatus_zero1; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_tsr; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_tw; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_tvm; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_mxr; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_sum; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_mprv; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_gstatus_fs; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_gstatus_mpp; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_gstatus_vs; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_spp; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_mpie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_ube; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_spie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_upie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_mie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_hie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_sie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_gstatus_uie; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_0_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_0_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_0_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_0_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_0_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_0_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_0_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_1_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_1_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_1_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_1_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_1_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_1_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_1_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_2_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_2_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_2_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_2_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_2_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_2_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_2_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_3_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_3_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_3_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_3_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_3_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_3_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_3_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_4_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_4_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_4_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_4_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_4_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_4_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_4_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_5_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_5_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_5_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_5_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_5_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_5_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_5_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_6_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_6_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_6_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_6_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_6_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_6_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_6_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_7_cfg_l; // @[PTW.scala:802:19] wire [1:0] _ptw_io_requestor_1_pmp_7_cfg_a; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_7_cfg_x; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_7_cfg_w; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_pmp_7_cfg_r; // @[PTW.scala:802:19] wire [29:0] _ptw_io_requestor_1_pmp_7_addr; // @[PTW.scala:802:19] wire [31:0] _ptw_io_requestor_1_pmp_7_mask; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_0_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_0_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_0_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_0_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_1_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_1_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_1_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_1_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_2_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_2_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_2_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_2_value; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_3_ren; // @[PTW.scala:802:19] wire _ptw_io_requestor_1_customCSRs_csrs_3_wen; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_3_wdata; // @[PTW.scala:802:19] wire [63:0] _ptw_io_requestor_1_customCSRs_csrs_3_value; // @[PTW.scala:802:19] wire _ptw_io_mem_req_valid; // @[PTW.scala:802:19] wire [39:0] _ptw_io_mem_req_bits_addr; // @[PTW.scala:802:19] wire _ptw_io_mem_req_bits_dv; // @[PTW.scala:802:19] wire _ptw_io_mem_s1_kill; // @[PTW.scala:802:19] wire _ptw_io_dpath_perf_pte_miss; // @[PTW.scala:802:19] wire _ptw_io_dpath_perf_pte_hit; // @[PTW.scala:802:19] wire _ptw_io_dpath_clock_enabled; // @[PTW.scala:802:19] wire _dcacheArb_io_requestor_0_req_ready; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_nack; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_nack_cause_raw; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_uncached; // @[HellaCache.scala:292:25] wire [31:0] _dcacheArb_io_requestor_0_s2_paddr; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_resp_valid; // @[HellaCache.scala:292:25] wire [39:0] _dcacheArb_io_requestor_0_resp_bits_addr; // @[HellaCache.scala:292:25] wire [6:0] _dcacheArb_io_requestor_0_resp_bits_tag; // @[HellaCache.scala:292:25] wire [4:0] _dcacheArb_io_requestor_0_resp_bits_cmd; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_requestor_0_resp_bits_size; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_resp_bits_signed; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_requestor_0_resp_bits_dprv; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_resp_bits_dv; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data; // @[HellaCache.scala:292:25] wire [7:0] _dcacheArb_io_requestor_0_resp_bits_mask; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_resp_bits_replay; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_resp_bits_has_data; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data_word_bypass; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_0_resp_bits_data_raw; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_0_resp_bits_store_data; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_replay_next; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_xcpt_ma_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_xcpt_ma_st; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_xcpt_pf_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_xcpt_pf_st; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_xcpt_ae_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_s2_xcpt_ae_st; // @[HellaCache.scala:292:25] wire [39:0] _dcacheArb_io_requestor_0_s2_gpa; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_ordered; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_store_pending; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_acquire; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_release; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_grant; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_tlbMiss; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_blocked; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_req_ready; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_nack; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_nack_cause_raw; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_uncached; // @[HellaCache.scala:292:25] wire [31:0] _dcacheArb_io_requestor_1_s2_paddr; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_resp_valid; // @[HellaCache.scala:292:25] wire [39:0] _dcacheArb_io_requestor_1_resp_bits_addr; // @[HellaCache.scala:292:25] wire [6:0] _dcacheArb_io_requestor_1_resp_bits_tag; // @[HellaCache.scala:292:25] wire [4:0] _dcacheArb_io_requestor_1_resp_bits_cmd; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_requestor_1_resp_bits_size; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_resp_bits_signed; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_requestor_1_resp_bits_dprv; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_resp_bits_dv; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data; // @[HellaCache.scala:292:25] wire [7:0] _dcacheArb_io_requestor_1_resp_bits_mask; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_resp_bits_replay; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_resp_bits_has_data; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data_word_bypass; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_1_resp_bits_data_raw; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_requestor_1_resp_bits_store_data; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_replay_next; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_xcpt_ma_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_xcpt_ma_st; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_xcpt_pf_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_xcpt_pf_st; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_xcpt_ae_ld; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_s2_xcpt_ae_st; // @[HellaCache.scala:292:25] wire [39:0] _dcacheArb_io_requestor_1_s2_gpa; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_ordered; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_store_pending; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_acquire; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_release; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_grant; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_tlbMiss; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_blocked; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad; // @[HellaCache.scala:292:25] wire _dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_req_valid; // @[HellaCache.scala:292:25] wire [39:0] _dcacheArb_io_mem_req_bits_addr; // @[HellaCache.scala:292:25] wire [6:0] _dcacheArb_io_mem_req_bits_tag; // @[HellaCache.scala:292:25] wire [4:0] _dcacheArb_io_mem_req_bits_cmd; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_mem_req_bits_size; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_req_bits_signed; // @[HellaCache.scala:292:25] wire [1:0] _dcacheArb_io_mem_req_bits_dprv; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_req_bits_dv; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_req_bits_phys; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_req_bits_no_resp; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_s1_kill; // @[HellaCache.scala:292:25] wire [63:0] _dcacheArb_io_mem_s1_data_data; // @[HellaCache.scala:292:25] wire _dcacheArb_io_mem_keep_clock_enabled; // @[HellaCache.scala:292:25] wire _fpuOpt_io_fcsr_flags_valid; // @[RocketTile.scala:242:62] wire [4:0] _fpuOpt_io_fcsr_flags_bits; // @[RocketTile.scala:242:62] wire [63:0] _fpuOpt_io_store_data; // @[RocketTile.scala:242:62] wire [63:0] _fpuOpt_io_toint_data; // @[RocketTile.scala:242:62] wire _fpuOpt_io_fcsr_rdy; // @[RocketTile.scala:242:62] wire _fpuOpt_io_nack_mem; // @[RocketTile.scala:242:62] wire _fpuOpt_io_illegal_rm; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_ldst; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_wen; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_ren1; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_ren2; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_ren3; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_swap12; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_swap23; // @[RocketTile.scala:242:62] wire [1:0] _fpuOpt_io_dec_typeTagIn; // @[RocketTile.scala:242:62] wire [1:0] _fpuOpt_io_dec_typeTagOut; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_fromint; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_toint; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_fastpipe; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_fma; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_div; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_sqrt; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_wflags; // @[RocketTile.scala:242:62] wire _fpuOpt_io_dec_vec; // @[RocketTile.scala:242:62] wire _fpuOpt_io_sboard_set; // @[RocketTile.scala:242:62] wire _fpuOpt_io_sboard_clr; // @[RocketTile.scala:242:62] wire [4:0] _fpuOpt_io_sboard_clra; // @[RocketTile.scala:242:62] wire _frontend_io_cpu_resp_valid; // @[Frontend.scala:393:28] wire [1:0] _frontend_io_cpu_resp_bits_btb_cfiType; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_btb_taken; // @[Frontend.scala:393:28] wire [1:0] _frontend_io_cpu_resp_bits_btb_mask; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_btb_bridx; // @[Frontend.scala:393:28] wire [38:0] _frontend_io_cpu_resp_bits_btb_target; // @[Frontend.scala:393:28] wire [4:0] _frontend_io_cpu_resp_bits_btb_entry; // @[Frontend.scala:393:28] wire [7:0] _frontend_io_cpu_resp_bits_btb_bht_history; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_btb_bht_value; // @[Frontend.scala:393:28] wire [39:0] _frontend_io_cpu_resp_bits_pc; // @[Frontend.scala:393:28] wire [31:0] _frontend_io_cpu_resp_bits_data; // @[Frontend.scala:393:28] wire [1:0] _frontend_io_cpu_resp_bits_mask; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_xcpt_pf_inst; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_xcpt_gf_inst; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_xcpt_ae_inst; // @[Frontend.scala:393:28] wire _frontend_io_cpu_resp_bits_replay; // @[Frontend.scala:393:28] wire _frontend_io_cpu_gpa_valid; // @[Frontend.scala:393:28] wire [39:0] _frontend_io_cpu_gpa_bits; // @[Frontend.scala:393:28] wire _frontend_io_cpu_gpa_is_pte; // @[Frontend.scala:393:28] wire [39:0] _frontend_io_cpu_npc; // @[Frontend.scala:393:28] wire _frontend_io_cpu_perf_acquire; // @[Frontend.scala:393:28] wire _frontend_io_cpu_perf_tlbMiss; // @[Frontend.scala:393:28] wire _frontend_io_ptw_req_valid; // @[Frontend.scala:393:28] wire _frontend_io_ptw_req_bits_valid; // @[Frontend.scala:393:28] wire [26:0] _frontend_io_ptw_req_bits_bits_addr; // @[Frontend.scala:393:28] wire _frontend_io_ptw_req_bits_bits_need_gpa; // @[Frontend.scala:393:28] wire _dcache_io_cpu_req_ready; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_nack; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_nack_cause_raw; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_uncached; // @[HellaCache.scala:278:43] wire [31:0] _dcache_io_cpu_s2_paddr; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_resp_valid; // @[HellaCache.scala:278:43] wire [39:0] _dcache_io_cpu_resp_bits_addr; // @[HellaCache.scala:278:43] wire [6:0] _dcache_io_cpu_resp_bits_tag; // @[HellaCache.scala:278:43] wire [4:0] _dcache_io_cpu_resp_bits_cmd; // @[HellaCache.scala:278:43] wire [1:0] _dcache_io_cpu_resp_bits_size; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_resp_bits_signed; // @[HellaCache.scala:278:43] wire [1:0] _dcache_io_cpu_resp_bits_dprv; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_resp_bits_dv; // @[HellaCache.scala:278:43] wire [63:0] _dcache_io_cpu_resp_bits_data; // @[HellaCache.scala:278:43] wire [7:0] _dcache_io_cpu_resp_bits_mask; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_resp_bits_replay; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_resp_bits_has_data; // @[HellaCache.scala:278:43] wire [63:0] _dcache_io_cpu_resp_bits_data_word_bypass; // @[HellaCache.scala:278:43] wire [63:0] _dcache_io_cpu_resp_bits_data_raw; // @[HellaCache.scala:278:43] wire [63:0] _dcache_io_cpu_resp_bits_store_data; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_replay_next; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_xcpt_ma_ld; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_xcpt_ma_st; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_xcpt_pf_ld; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_xcpt_pf_st; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_xcpt_ae_ld; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_s2_xcpt_ae_st; // @[HellaCache.scala:278:43] wire [39:0] _dcache_io_cpu_s2_gpa; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_ordered; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_store_pending; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_acquire; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_release; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_grant; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_tlbMiss; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_blocked; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_canAcceptStoreThenLoad; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_canAcceptStoreThenRMW; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_canAcceptLoadThenLoad; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_storeBufferEmptyAfterLoad; // @[HellaCache.scala:278:43] wire _dcache_io_cpu_perf_storeBufferEmptyAfterStore; // @[HellaCache.scala:278:43] wire _dcache_io_ptw_req_valid; // @[HellaCache.scala:278:43] wire [26:0] _dcache_io_ptw_req_bits_bits_addr; // @[HellaCache.scala:278:43] wire _dcache_io_ptw_req_bits_bits_need_gpa; // @[HellaCache.scala:278:43] wire auto_buffer_out_a_ready_0 = auto_buffer_out_a_ready; // @[RocketTile.scala:141:7] wire auto_buffer_out_b_valid_0 = auto_buffer_out_b_valid; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_b_bits_opcode_0 = auto_buffer_out_b_bits_opcode; // @[RocketTile.scala:141:7] wire [1:0] auto_buffer_out_b_bits_param_0 = auto_buffer_out_b_bits_param; // @[RocketTile.scala:141:7] wire [3:0] auto_buffer_out_b_bits_size_0 = auto_buffer_out_b_bits_size; // @[RocketTile.scala:141:7] wire [1:0] auto_buffer_out_b_bits_source_0 = auto_buffer_out_b_bits_source; // @[RocketTile.scala:141:7] wire [31:0] auto_buffer_out_b_bits_address_0 = auto_buffer_out_b_bits_address; // @[RocketTile.scala:141:7] wire [7:0] auto_buffer_out_b_bits_mask_0 = auto_buffer_out_b_bits_mask; // @[RocketTile.scala:141:7] wire [63:0] auto_buffer_out_b_bits_data_0 = auto_buffer_out_b_bits_data; // @[RocketTile.scala:141:7] wire auto_buffer_out_b_bits_corrupt_0 = auto_buffer_out_b_bits_corrupt; // @[RocketTile.scala:141:7] wire auto_buffer_out_c_ready_0 = auto_buffer_out_c_ready; // @[RocketTile.scala:141:7] wire auto_buffer_out_d_valid_0 = auto_buffer_out_d_valid; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_d_bits_opcode_0 = auto_buffer_out_d_bits_opcode; // @[RocketTile.scala:141:7] wire [1:0] auto_buffer_out_d_bits_param_0 = auto_buffer_out_d_bits_param; // @[RocketTile.scala:141:7] wire [3:0] auto_buffer_out_d_bits_size_0 = auto_buffer_out_d_bits_size; // @[RocketTile.scala:141:7] wire [1:0] auto_buffer_out_d_bits_source_0 = auto_buffer_out_d_bits_source; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_d_bits_sink_0 = auto_buffer_out_d_bits_sink; // @[RocketTile.scala:141:7] wire auto_buffer_out_d_bits_denied_0 = auto_buffer_out_d_bits_denied; // @[RocketTile.scala:141:7] wire [63:0] auto_buffer_out_d_bits_data_0 = auto_buffer_out_d_bits_data; // @[RocketTile.scala:141:7] wire auto_buffer_out_d_bits_corrupt_0 = auto_buffer_out_d_bits_corrupt; // @[RocketTile.scala:141:7] wire auto_buffer_out_e_ready_0 = auto_buffer_out_e_ready; // @[RocketTile.scala:141:7] wire auto_int_local_in_3_0_0 = auto_int_local_in_3_0; // @[RocketTile.scala:141:7] wire auto_int_local_in_2_0_0 = auto_int_local_in_2_0; // @[RocketTile.scala:141:7] wire auto_int_local_in_1_0_0 = auto_int_local_in_1_0; // @[RocketTile.scala:141:7] wire auto_int_local_in_1_1_0 = auto_int_local_in_1_1; // @[RocketTile.scala:141:7] wire auto_int_local_in_0_0_0 = auto_int_local_in_0_0; // @[RocketTile.scala:141:7] wire [2:0] auto_hartid_in_0 = auto_hartid_in; // @[RocketTile.scala:141:7] wire auto_buffer_out_a_bits_corrupt = 1'h0; // @[RocketTile.scala:141:7] wire auto_buffer_out_c_bits_corrupt = 1'h0; // @[RocketTile.scala:141:7] wire auto_cease_out_0 = 1'h0; // @[RocketTile.scala:141:7] wire auto_halt_out_0 = 1'h0; // @[RocketTile.scala:141:7] wire auto_trace_core_source_out_group_0_iretire = 1'h0; // @[RocketTile.scala:141:7] wire auto_trace_core_source_out_group_0_ilastsize = 1'h0; // @[RocketTile.scala:141:7] wire broadcast_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire broadcast_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire broadcast__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire broadcast_1_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire broadcast_1_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire broadcast_1__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire nexus_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire nexus_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire nexus__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire nexus_1_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire nexus_1_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire nexus_1__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire nexus_1_x1_bundleOut_x_sourceOpt_enable = 1'h0; // @[BaseTile.scala:305:19] wire nexus_1_x1_bundleOut_x_sourceOpt_stall = 1'h0; // @[BaseTile.scala:305:19] wire nexus_1_nodeOut_enable = 1'h0; // @[MixedNode.scala:542:17] wire nexus_1_nodeOut_stall = 1'h0; // @[MixedNode.scala:542:17] wire nexus_1_defaultWireOpt_enable = 1'h0; // @[BaseTile.scala:305:19] wire nexus_1_defaultWireOpt_stall = 1'h0; // @[BaseTile.scala:305:19] wire broadcast_2_auto_in_0_rvalid_0 = 1'h0; // @[BundleBridgeNexus.scala:20:9] wire broadcast_2_auto_in_0_wvalid_0 = 1'h0; // @[BundleBridgeNexus.scala:20:9] wire broadcast_2_auto_in_0_ivalid_0 = 1'h0; // @[BundleBridgeNexus.scala:20:9] wire broadcast_2_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire broadcast_2_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire broadcast_2__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire broadcast_2_nodeIn_0_rvalid_0 = 1'h0; // @[MixedNode.scala:551:17] wire broadcast_2_nodeIn_0_wvalid_0 = 1'h0; // @[MixedNode.scala:551:17] wire broadcast_2_nodeIn_0_ivalid_0 = 1'h0; // @[MixedNode.scala:551:17] wire widget_auto_anon_in_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_c_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_c_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9] wire widget_anonOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire widget_anonOut_c_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire widget_anonIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire widget_anonIn_c_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire widget_1_auto_anon_in_a_bits_source = 1'h0; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_d_bits_source = 1'h0; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_a_bits_source = 1'h0; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_a_bits_corrupt = 1'h0; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_d_bits_source = 1'h0; // @[WidthWidget.scala:27:9] wire widget_1_anonOut_a_bits_source = 1'h0; // @[MixedNode.scala:542:17] wire widget_1_anonOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire widget_1_anonOut_d_bits_source = 1'h0; // @[MixedNode.scala:542:17] wire widget_1_anonIn_a_bits_source = 1'h0; // @[MixedNode.scala:551:17] wire widget_1_anonIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire widget_1_anonIn_d_bits_source = 1'h0; // @[MixedNode.scala:551:17] wire buffer_auto_in_a_bits_corrupt = 1'h0; // @[Buffer.scala:40:9] wire buffer_auto_in_c_bits_corrupt = 1'h0; // @[Buffer.scala:40:9] wire buffer_auto_out_a_bits_corrupt = 1'h0; // @[Buffer.scala:40:9] wire buffer_auto_out_c_bits_corrupt = 1'h0; // @[Buffer.scala:40:9] wire buffer_nodeOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire buffer_nodeOut_c_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire buffer_nodeIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire buffer_nodeIn_c_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire tlOtherMastersNodeOut_c_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire tlOtherMastersNodeIn_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeIn_c_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire traceCoreSourceNodeOut_group_0_iretire = 1'h0; // @[MixedNode.scala:542:17] wire traceCoreSourceNodeOut_group_0_ilastsize = 1'h0; // @[MixedNode.scala:542:17] wire bundleIn_x_sourceOpt_enable = 1'h0; // @[BaseTile.scala:305:19] wire bundleIn_x_sourceOpt_stall = 1'h0; // @[BaseTile.scala:305:19] wire traceAuxSinkNodeIn_enable = 1'h0; // @[MixedNode.scala:551:17] wire traceAuxSinkNodeIn_stall = 1'h0; // @[MixedNode.scala:551:17] wire bpwatchSourceNodeOut_0_rvalid_0 = 1'h0; // @[MixedNode.scala:542:17] wire bpwatchSourceNodeOut_0_wvalid_0 = 1'h0; // @[MixedNode.scala:542:17] wire bpwatchSourceNodeOut_0_ivalid_0 = 1'h0; // @[MixedNode.scala:542:17] wire haltNodeOut_0 = 1'h0; // @[MixedNode.scala:542:17] wire ceaseNodeOut_0 = 1'h0; // @[MixedNode.scala:542:17] wire [2:0] widget_1_auto_anon_in_a_bits_opcode = 3'h4; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_out_a_bits_opcode = 3'h4; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_anonOut_a_bits_opcode = 3'h4; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_anonIn_a_bits_opcode = 3'h4; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_auto_anon_in_a_bits_size = 4'h6; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_auto_anon_out_a_bits_size = 4'h6; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_anonOut_a_bits_size = 4'h6; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_anonIn_a_bits_size = 4'h6; // @[WidthWidget.scala:27:9] wire [7:0] widget_1_auto_anon_in_a_bits_mask = 8'hFF; // @[WidthWidget.scala:27:9] wire [7:0] widget_1_auto_anon_out_a_bits_mask = 8'hFF; // @[WidthWidget.scala:27:9] wire [7:0] widget_1_anonOut_a_bits_mask = 8'hFF; // @[WidthWidget.scala:27:9] wire [7:0] widget_1_anonIn_a_bits_mask = 8'hFF; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_in_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_out_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_anonOut_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_anonIn_a_bits_param = 3'h0; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_auto_anon_in_a_bits_data = 64'h0; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_auto_anon_out_a_bits_data = 64'h0; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_anonOut_a_bits_data = 64'h0; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_anonIn_a_bits_data = 64'h0; // @[WidthWidget.scala:27:9] wire [31:0] auto_reset_vector_in = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] broadcast_1_auto_in = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] broadcast_1_auto_out_1 = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] broadcast_1_auto_out_0 = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] broadcast_1_nodeIn = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] broadcast_1_nodeOut = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] broadcast_1_x1_nodeOut = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] resetVectorSinkNodeIn = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] reset_vectorOut = 32'h10000; // @[RocketTile.scala:141:7] wire [31:0] reset_vectorIn = 32'h10000; // @[RocketTile.scala:141:7] wire [3:0] auto_trace_core_source_out_group_0_itype = 4'h0; // @[RocketTile.scala:141:7] wire [3:0] auto_trace_core_source_out_priv = 4'h0; // @[RocketTile.scala:141:7] wire [3:0] traceCoreSourceNodeOut_group_0_itype = 4'h0; // @[MixedNode.scala:542:17] wire [3:0] traceCoreSourceNodeOut_priv = 4'h0; // @[MixedNode.scala:542:17] wire [31:0] auto_trace_core_source_out_group_0_iaddr = 32'h0; // @[RocketTile.scala:141:7] wire [31:0] auto_trace_core_source_out_tval = 32'h0; // @[RocketTile.scala:141:7] wire [31:0] auto_trace_core_source_out_cause = 32'h0; // @[RocketTile.scala:141:7] wire [31:0] traceCoreSourceNodeOut_group_0_iaddr = 32'h0; // @[MixedNode.scala:542:17] wire [31:0] traceCoreSourceNodeOut_tval = 32'h0; // @[MixedNode.scala:542:17] wire [31:0] traceCoreSourceNodeOut_cause = 32'h0; // @[MixedNode.scala:542:17] wire widget_1_auto_anon_in_d_ready = 1'h1; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_d_ready = 1'h1; // @[WidthWidget.scala:27:9] wire widget_1_anonOut_d_ready = 1'h1; // @[WidthWidget.scala:27:9] wire widget_1_anonIn_d_ready = 1'h1; // @[WidthWidget.scala:27:9] wire buffer_auto_out_a_ready = auto_buffer_out_a_ready_0; // @[Buffer.scala:40:9] wire buffer_auto_out_a_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_a_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_a_bits_param; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_out_a_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_out_a_bits_source; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_out_a_bits_address; // @[Buffer.scala:40:9] wire [7:0] buffer_auto_out_a_bits_mask; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_out_a_bits_data; // @[Buffer.scala:40:9] wire buffer_auto_out_b_ready; // @[Buffer.scala:40:9] wire buffer_auto_out_b_valid = auto_buffer_out_b_valid_0; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_b_bits_opcode = auto_buffer_out_b_bits_opcode_0; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_out_b_bits_param = auto_buffer_out_b_bits_param_0; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_out_b_bits_size = auto_buffer_out_b_bits_size_0; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_out_b_bits_source = auto_buffer_out_b_bits_source_0; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_out_b_bits_address = auto_buffer_out_b_bits_address_0; // @[Buffer.scala:40:9] wire [7:0] buffer_auto_out_b_bits_mask = auto_buffer_out_b_bits_mask_0; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_out_b_bits_data = auto_buffer_out_b_bits_data_0; // @[Buffer.scala:40:9] wire buffer_auto_out_b_bits_corrupt = auto_buffer_out_b_bits_corrupt_0; // @[Buffer.scala:40:9] wire buffer_auto_out_c_ready = auto_buffer_out_c_ready_0; // @[Buffer.scala:40:9] wire buffer_auto_out_c_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_c_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_c_bits_param; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_out_c_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_out_c_bits_source; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_out_c_bits_address; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_out_c_bits_data; // @[Buffer.scala:40:9] wire buffer_auto_out_d_ready; // @[Buffer.scala:40:9] wire buffer_auto_out_d_valid = auto_buffer_out_d_valid_0; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_d_bits_opcode = auto_buffer_out_d_bits_opcode_0; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_out_d_bits_param = auto_buffer_out_d_bits_param_0; // @[Buffer.scala:40:9] wire [3:0] buffer_auto_out_d_bits_size = auto_buffer_out_d_bits_size_0; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_out_d_bits_source = auto_buffer_out_d_bits_source_0; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_d_bits_sink = auto_buffer_out_d_bits_sink_0; // @[Buffer.scala:40:9] wire buffer_auto_out_d_bits_denied = auto_buffer_out_d_bits_denied_0; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_out_d_bits_data = auto_buffer_out_d_bits_data_0; // @[Buffer.scala:40:9] wire buffer_auto_out_d_bits_corrupt = auto_buffer_out_d_bits_corrupt_0; // @[Buffer.scala:40:9] wire buffer_auto_out_e_ready = auto_buffer_out_e_ready_0; // @[Buffer.scala:40:9] wire buffer_auto_out_e_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_out_e_bits_sink; // @[Buffer.scala:40:9] wire wfiNodeOut_0; // @[MixedNode.scala:542:17] wire x1_int_localIn_2_0 = auto_int_local_in_3_0_0; // @[RocketTile.scala:141:7] wire x1_int_localIn_1_0 = auto_int_local_in_2_0_0; // @[RocketTile.scala:141:7] wire x1_int_localIn_0 = auto_int_local_in_1_0_0; // @[RocketTile.scala:141:7] wire x1_int_localIn_1 = auto_int_local_in_1_1_0; // @[RocketTile.scala:141:7] wire int_localIn_0 = auto_int_local_in_0_0_0; // @[RocketTile.scala:141:7] wire traceSourceNodeOut_insns_0_valid; // @[MixedNode.scala:542:17] wire [39:0] traceSourceNodeOut_insns_0_iaddr; // @[MixedNode.scala:542:17] wire [31:0] traceSourceNodeOut_insns_0_insn; // @[MixedNode.scala:542:17] wire [2:0] traceSourceNodeOut_insns_0_priv; // @[MixedNode.scala:542:17] wire traceSourceNodeOut_insns_0_exception; // @[MixedNode.scala:542:17] wire traceSourceNodeOut_insns_0_interrupt; // @[MixedNode.scala:542:17] wire [63:0] traceSourceNodeOut_insns_0_cause; // @[MixedNode.scala:542:17] wire [39:0] traceSourceNodeOut_insns_0_tval; // @[MixedNode.scala:542:17] wire [63:0] traceSourceNodeOut_time; // @[MixedNode.scala:542:17] wire [2:0] hartidIn = auto_hartid_in_0; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_a_bits_opcode_0; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_a_bits_param_0; // @[RocketTile.scala:141:7] wire [3:0] auto_buffer_out_a_bits_size_0; // @[RocketTile.scala:141:7] wire [1:0] auto_buffer_out_a_bits_source_0; // @[RocketTile.scala:141:7] wire [31:0] auto_buffer_out_a_bits_address_0; // @[RocketTile.scala:141:7] wire [7:0] auto_buffer_out_a_bits_mask_0; // @[RocketTile.scala:141:7] wire [63:0] auto_buffer_out_a_bits_data_0; // @[RocketTile.scala:141:7] wire auto_buffer_out_a_valid_0; // @[RocketTile.scala:141:7] wire auto_buffer_out_b_ready_0; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_c_bits_opcode_0; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_c_bits_param_0; // @[RocketTile.scala:141:7] wire [3:0] auto_buffer_out_c_bits_size_0; // @[RocketTile.scala:141:7] wire [1:0] auto_buffer_out_c_bits_source_0; // @[RocketTile.scala:141:7] wire [31:0] auto_buffer_out_c_bits_address_0; // @[RocketTile.scala:141:7] wire [63:0] auto_buffer_out_c_bits_data_0; // @[RocketTile.scala:141:7] wire auto_buffer_out_c_valid_0; // @[RocketTile.scala:141:7] wire auto_buffer_out_d_ready_0; // @[RocketTile.scala:141:7] wire [2:0] auto_buffer_out_e_bits_sink_0; // @[RocketTile.scala:141:7] wire auto_buffer_out_e_valid_0; // @[RocketTile.scala:141:7] wire auto_wfi_out_0_0; // @[RocketTile.scala:141:7] wire auto_trace_source_out_insns_0_valid_0; // @[RocketTile.scala:141:7] wire [39:0] auto_trace_source_out_insns_0_iaddr_0; // @[RocketTile.scala:141:7] wire [31:0] auto_trace_source_out_insns_0_insn_0; // @[RocketTile.scala:141:7] wire [2:0] auto_trace_source_out_insns_0_priv_0; // @[RocketTile.scala:141:7] wire auto_trace_source_out_insns_0_exception_0; // @[RocketTile.scala:141:7] wire auto_trace_source_out_insns_0_interrupt_0; // @[RocketTile.scala:141:7] wire [63:0] auto_trace_source_out_insns_0_cause_0; // @[RocketTile.scala:141:7] wire [39:0] auto_trace_source_out_insns_0_tval_0; // @[RocketTile.scala:141:7] wire [63:0] auto_trace_source_out_time_0; // @[RocketTile.scala:141:7] wire [2:0] hartidOut; // @[MixedNode.scala:542:17] wire [2:0] broadcast_nodeIn = broadcast_auto_in; // @[MixedNode.scala:551:17] wire [2:0] broadcast_nodeOut; // @[MixedNode.scala:542:17] wire [2:0] broadcast_auto_out; // @[BundleBridgeNexus.scala:20:9] wire [2:0] hartIdSinkNodeIn = broadcast_auto_out; // @[MixedNode.scala:551:17] assign broadcast_nodeOut = broadcast_nodeIn; // @[MixedNode.scala:542:17, :551:17] assign broadcast_auto_out = broadcast_nodeOut; // @[MixedNode.scala:542:17] wire bpwatchSourceNodeOut_0_valid_0; // @[MixedNode.scala:542:17] wire broadcast_2_nodeIn_0_valid_0 = broadcast_2_auto_in_0_valid_0; // @[MixedNode.scala:551:17] wire [2:0] bpwatchSourceNodeOut_0_action; // @[MixedNode.scala:542:17] wire [2:0] broadcast_2_nodeIn_0_action = broadcast_2_auto_in_0_action; // @[MixedNode.scala:551:17] wire widget_anonIn_a_ready; // @[MixedNode.scala:551:17] wire widget_anonIn_a_valid = widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonIn_a_bits_opcode = widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonIn_a_bits_param = widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_anonIn_a_bits_size = widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9] wire widget_anonIn_a_bits_source = widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9] wire [31:0] widget_anonIn_a_bits_address = widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9] wire [7:0] widget_anonIn_a_bits_mask = widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9] wire [63:0] widget_anonIn_a_bits_data = widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9] wire widget_anonIn_b_ready = widget_auto_anon_in_b_ready; // @[WidthWidget.scala:27:9] wire widget_anonIn_b_valid; // @[MixedNode.scala:551:17] wire [2:0] widget_anonIn_b_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] widget_anonIn_b_bits_param; // @[MixedNode.scala:551:17] wire [3:0] widget_anonIn_b_bits_size; // @[MixedNode.scala:551:17] wire widget_anonIn_b_bits_source; // @[MixedNode.scala:551:17] wire [31:0] widget_anonIn_b_bits_address; // @[MixedNode.scala:551:17] wire [7:0] widget_anonIn_b_bits_mask; // @[MixedNode.scala:551:17] wire [63:0] widget_anonIn_b_bits_data; // @[MixedNode.scala:551:17] wire widget_anonIn_b_bits_corrupt; // @[MixedNode.scala:551:17] wire widget_anonIn_c_ready; // @[MixedNode.scala:551:17] wire widget_anonIn_c_valid = widget_auto_anon_in_c_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonIn_c_bits_opcode = widget_auto_anon_in_c_bits_opcode; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonIn_c_bits_param = widget_auto_anon_in_c_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_anonIn_c_bits_size = widget_auto_anon_in_c_bits_size; // @[WidthWidget.scala:27:9] wire widget_anonIn_c_bits_source = widget_auto_anon_in_c_bits_source; // @[WidthWidget.scala:27:9] wire [31:0] widget_anonIn_c_bits_address = widget_auto_anon_in_c_bits_address; // @[WidthWidget.scala:27:9] wire [63:0] widget_anonIn_c_bits_data = widget_auto_anon_in_c_bits_data; // @[WidthWidget.scala:27:9] wire widget_anonIn_d_ready = widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9] wire widget_anonIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] widget_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] widget_anonIn_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] widget_anonIn_d_bits_size; // @[MixedNode.scala:551:17] wire widget_anonIn_d_bits_source; // @[MixedNode.scala:551:17] wire [2:0] widget_anonIn_d_bits_sink; // @[MixedNode.scala:551:17] wire widget_anonIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] widget_anonIn_d_bits_data; // @[MixedNode.scala:551:17] wire widget_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire widget_anonIn_e_ready; // @[MixedNode.scala:551:17] wire widget_anonIn_e_valid = widget_auto_anon_in_e_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonIn_e_bits_sink = widget_auto_anon_in_e_bits_sink; // @[WidthWidget.scala:27:9] wire widget_anonOut_a_ready = widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9] wire widget_anonOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] widget_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] widget_anonOut_a_bits_param; // @[MixedNode.scala:542:17] wire [3:0] widget_anonOut_a_bits_size; // @[MixedNode.scala:542:17] wire widget_anonOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] widget_anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] widget_anonOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] widget_anonOut_a_bits_data; // @[MixedNode.scala:542:17] wire widget_anonOut_b_ready; // @[MixedNode.scala:542:17] wire widget_anonOut_b_valid = widget_auto_anon_out_b_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonOut_b_bits_opcode = widget_auto_anon_out_b_bits_opcode; // @[WidthWidget.scala:27:9] wire [1:0] widget_anonOut_b_bits_param = widget_auto_anon_out_b_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_anonOut_b_bits_size = widget_auto_anon_out_b_bits_size; // @[WidthWidget.scala:27:9] wire widget_anonOut_b_bits_source = widget_auto_anon_out_b_bits_source; // @[WidthWidget.scala:27:9] wire [31:0] widget_anonOut_b_bits_address = widget_auto_anon_out_b_bits_address; // @[WidthWidget.scala:27:9] wire [7:0] widget_anonOut_b_bits_mask = widget_auto_anon_out_b_bits_mask; // @[WidthWidget.scala:27:9] wire [63:0] widget_anonOut_b_bits_data = widget_auto_anon_out_b_bits_data; // @[WidthWidget.scala:27:9] wire widget_anonOut_b_bits_corrupt = widget_auto_anon_out_b_bits_corrupt; // @[WidthWidget.scala:27:9] wire widget_anonOut_c_ready = widget_auto_anon_out_c_ready; // @[WidthWidget.scala:27:9] wire widget_anonOut_c_valid; // @[MixedNode.scala:542:17] wire [2:0] widget_anonOut_c_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] widget_anonOut_c_bits_param; // @[MixedNode.scala:542:17] wire [3:0] widget_anonOut_c_bits_size; // @[MixedNode.scala:542:17] wire widget_anonOut_c_bits_source; // @[MixedNode.scala:542:17] wire [31:0] widget_anonOut_c_bits_address; // @[MixedNode.scala:542:17] wire [63:0] widget_anonOut_c_bits_data; // @[MixedNode.scala:542:17] wire widget_anonOut_d_ready; // @[MixedNode.scala:542:17] wire widget_anonOut_d_valid = widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonOut_d_bits_opcode = widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9] wire [1:0] widget_anonOut_d_bits_param = widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_anonOut_d_bits_size = widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9] wire widget_anonOut_d_bits_source = widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9] wire [2:0] widget_anonOut_d_bits_sink = widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9] wire widget_anonOut_d_bits_denied = widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9] wire [63:0] widget_anonOut_d_bits_data = widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9] wire widget_anonOut_d_bits_corrupt = widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire widget_anonOut_e_ready = widget_auto_anon_out_e_ready; // @[WidthWidget.scala:27:9] wire widget_anonOut_e_valid; // @[MixedNode.scala:542:17] wire [2:0] widget_anonOut_e_bits_sink; // @[MixedNode.scala:542:17] wire widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_b_bits_opcode; // @[WidthWidget.scala:27:9] wire [1:0] widget_auto_anon_in_b_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_in_b_bits_size; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_b_bits_source; // @[WidthWidget.scala:27:9] wire [31:0] widget_auto_anon_in_b_bits_address; // @[WidthWidget.scala:27:9] wire [7:0] widget_auto_anon_in_b_bits_mask; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_in_b_bits_data; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_b_bits_corrupt; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_b_valid; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_c_ready; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9] wire [1:0] widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9] wire widget_auto_anon_in_e_ready; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9] wire [31:0] widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9] wire [7:0] widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_b_ready; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_c_bits_opcode; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_c_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_auto_anon_out_c_bits_size; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_c_bits_source; // @[WidthWidget.scala:27:9] wire [31:0] widget_auto_anon_out_c_bits_address; // @[WidthWidget.scala:27:9] wire [63:0] widget_auto_anon_out_c_bits_data; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_c_valid; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9] wire [2:0] widget_auto_anon_out_e_bits_sink; // @[WidthWidget.scala:27:9] wire widget_auto_anon_out_e_valid; // @[WidthWidget.scala:27:9] assign widget_anonIn_a_ready = widget_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17] assign widget_auto_anon_out_a_valid = widget_anonOut_a_valid; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_opcode = widget_anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_param = widget_anonOut_a_bits_param; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_size = widget_anonOut_a_bits_size; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_source = widget_anonOut_a_bits_source; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_address = widget_anonOut_a_bits_address; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_mask = widget_anonOut_a_bits_mask; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_a_bits_data = widget_anonOut_a_bits_data; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_b_ready = widget_anonOut_b_ready; // @[WidthWidget.scala:27:9] assign widget_anonIn_b_valid = widget_anonOut_b_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_opcode = widget_anonOut_b_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_param = widget_anonOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_size = widget_anonOut_b_bits_size; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_source = widget_anonOut_b_bits_source; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_address = widget_anonOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_mask = widget_anonOut_b_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_data = widget_anonOut_b_bits_data; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_b_bits_corrupt = widget_anonOut_b_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_c_ready = widget_anonOut_c_ready; // @[MixedNode.scala:542:17, :551:17] assign widget_auto_anon_out_c_valid = widget_anonOut_c_valid; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_c_bits_opcode = widget_anonOut_c_bits_opcode; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_c_bits_param = widget_anonOut_c_bits_param; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_c_bits_size = widget_anonOut_c_bits_size; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_c_bits_source = widget_anonOut_c_bits_source; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_c_bits_address = widget_anonOut_c_bits_address; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_c_bits_data = widget_anonOut_c_bits_data; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_d_ready = widget_anonOut_d_ready; // @[WidthWidget.scala:27:9] assign widget_anonIn_d_valid = widget_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_opcode = widget_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_param = widget_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_size = widget_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_source = widget_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_sink = widget_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_denied = widget_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_data = widget_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_d_bits_corrupt = widget_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign widget_anonIn_e_ready = widget_anonOut_e_ready; // @[MixedNode.scala:542:17, :551:17] assign widget_auto_anon_out_e_valid = widget_anonOut_e_valid; // @[WidthWidget.scala:27:9] assign widget_auto_anon_out_e_bits_sink = widget_anonOut_e_bits_sink; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_a_ready = widget_anonIn_a_ready; // @[WidthWidget.scala:27:9] assign widget_anonOut_a_valid = widget_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_opcode = widget_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_param = widget_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_size = widget_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_source = widget_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_address = widget_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_mask = widget_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_a_bits_data = widget_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_b_ready = widget_anonIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign widget_auto_anon_in_b_valid = widget_anonIn_b_valid; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_opcode = widget_anonIn_b_bits_opcode; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_param = widget_anonIn_b_bits_param; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_size = widget_anonIn_b_bits_size; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_source = widget_anonIn_b_bits_source; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_address = widget_anonIn_b_bits_address; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_mask = widget_anonIn_b_bits_mask; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_data = widget_anonIn_b_bits_data; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_b_bits_corrupt = widget_anonIn_b_bits_corrupt; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_c_ready = widget_anonIn_c_ready; // @[WidthWidget.scala:27:9] assign widget_anonOut_c_valid = widget_anonIn_c_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_c_bits_opcode = widget_anonIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_c_bits_param = widget_anonIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_c_bits_size = widget_anonIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_c_bits_source = widget_anonIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_c_bits_address = widget_anonIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_c_bits_data = widget_anonIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_d_ready = widget_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign widget_auto_anon_in_d_valid = widget_anonIn_d_valid; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_opcode = widget_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_param = widget_anonIn_d_bits_param; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_size = widget_anonIn_d_bits_size; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_source = widget_anonIn_d_bits_source; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_sink = widget_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_denied = widget_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_data = widget_anonIn_d_bits_data; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_d_bits_corrupt = widget_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9] assign widget_auto_anon_in_e_ready = widget_anonIn_e_ready; // @[WidthWidget.scala:27:9] assign widget_anonOut_e_valid = widget_anonIn_e_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_anonOut_e_bits_sink = widget_anonIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire widget_1_anonIn_a_ready; // @[MixedNode.scala:551:17] wire widget_1_anonIn_a_valid = widget_1_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9] wire [31:0] widget_1_anonIn_a_bits_address = widget_1_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9] wire widget_1_anonIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] widget_1_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] widget_1_anonIn_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] widget_1_anonIn_d_bits_size; // @[MixedNode.scala:551:17] wire [2:0] widget_1_anonIn_d_bits_sink; // @[MixedNode.scala:551:17] wire widget_1_anonIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] widget_1_anonIn_d_bits_data; // @[MixedNode.scala:551:17] wire widget_1_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire widget_1_anonOut_a_ready = widget_1_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9] wire widget_1_anonOut_a_valid; // @[MixedNode.scala:542:17] wire [31:0] widget_1_anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire widget_1_anonOut_d_valid = widget_1_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_anonOut_d_bits_opcode = widget_1_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9] wire [1:0] widget_1_anonOut_d_bits_param = widget_1_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_anonOut_d_bits_size = widget_1_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_anonOut_d_bits_sink = widget_1_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9] wire widget_1_anonOut_d_bits_denied = widget_1_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_anonOut_d_bits_data = widget_1_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9] wire widget_1_anonOut_d_bits_corrupt = widget_1_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9] wire [1:0] widget_1_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9] wire [3:0] widget_1_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9] wire [2:0] widget_1_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9] wire [63:0] widget_1_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9] wire [31:0] widget_1_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9] wire widget_1_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9] assign widget_1_anonIn_a_ready = widget_1_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17] assign widget_1_auto_anon_out_a_valid = widget_1_anonOut_a_valid; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_out_a_bits_address = widget_1_anonOut_a_bits_address; // @[WidthWidget.scala:27:9] assign widget_1_anonIn_d_valid = widget_1_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_opcode = widget_1_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_param = widget_1_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_size = widget_1_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_sink = widget_1_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_denied = widget_1_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_data = widget_1_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonIn_d_bits_corrupt = widget_1_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign widget_1_auto_anon_in_a_ready = widget_1_anonIn_a_ready; // @[WidthWidget.scala:27:9] assign widget_1_anonOut_a_valid = widget_1_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign widget_1_anonOut_a_bits_address = widget_1_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign widget_1_auto_anon_in_d_valid = widget_1_anonIn_d_valid; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_opcode = widget_1_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_param = widget_1_anonIn_d_bits_param; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_size = widget_1_anonIn_d_bits_size; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_sink = widget_1_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_denied = widget_1_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_data = widget_1_anonIn_d_bits_data; // @[WidthWidget.scala:27:9] assign widget_1_auto_anon_in_d_bits_corrupt = widget_1_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9] wire buffer_nodeIn_a_ready; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_a_ready = buffer_auto_in_a_ready; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_a_valid; // @[MixedNode.scala:542:17] wire buffer_nodeIn_a_valid = buffer_auto_in_a_valid; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_a_bits_opcode = buffer_auto_in_a_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_a_bits_param = buffer_auto_in_a_bits_param; // @[Buffer.scala:40:9] wire [3:0] tlOtherMastersNodeOut_a_bits_size; // @[MixedNode.scala:542:17] wire [3:0] buffer_nodeIn_a_bits_size = buffer_auto_in_a_bits_size; // @[Buffer.scala:40:9] wire [1:0] tlOtherMastersNodeOut_a_bits_source; // @[MixedNode.scala:542:17] wire [1:0] buffer_nodeIn_a_bits_source = buffer_auto_in_a_bits_source; // @[Buffer.scala:40:9] wire [31:0] tlOtherMastersNodeOut_a_bits_address; // @[MixedNode.scala:542:17] wire [31:0] buffer_nodeIn_a_bits_address = buffer_auto_in_a_bits_address; // @[Buffer.scala:40:9] wire [7:0] tlOtherMastersNodeOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [7:0] buffer_nodeIn_a_bits_mask = buffer_auto_in_a_bits_mask; // @[Buffer.scala:40:9] wire [63:0] tlOtherMastersNodeOut_a_bits_data; // @[MixedNode.scala:542:17] wire [63:0] buffer_nodeIn_a_bits_data = buffer_auto_in_a_bits_data; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_b_ready; // @[MixedNode.scala:542:17] wire buffer_nodeIn_b_ready = buffer_auto_in_b_ready; // @[Buffer.scala:40:9] wire buffer_nodeIn_b_valid; // @[MixedNode.scala:551:17] wire [2:0] buffer_nodeIn_b_bits_opcode; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_b_valid = buffer_auto_in_b_valid; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeIn_b_bits_param; // @[MixedNode.scala:551:17] wire [2:0] tlOtherMastersNodeOut_b_bits_opcode = buffer_auto_in_b_bits_opcode; // @[Buffer.scala:40:9] wire [3:0] buffer_nodeIn_b_bits_size; // @[MixedNode.scala:551:17] wire [1:0] tlOtherMastersNodeOut_b_bits_param = buffer_auto_in_b_bits_param; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeIn_b_bits_source; // @[MixedNode.scala:551:17] wire [3:0] tlOtherMastersNodeOut_b_bits_size = buffer_auto_in_b_bits_size; // @[Buffer.scala:40:9] wire [31:0] buffer_nodeIn_b_bits_address; // @[MixedNode.scala:551:17] wire [1:0] tlOtherMastersNodeOut_b_bits_source = buffer_auto_in_b_bits_source; // @[Buffer.scala:40:9] wire [7:0] buffer_nodeIn_b_bits_mask; // @[MixedNode.scala:551:17] wire [31:0] tlOtherMastersNodeOut_b_bits_address = buffer_auto_in_b_bits_address; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeIn_b_bits_data; // @[MixedNode.scala:551:17] wire [7:0] tlOtherMastersNodeOut_b_bits_mask = buffer_auto_in_b_bits_mask; // @[Buffer.scala:40:9] wire buffer_nodeIn_b_bits_corrupt; // @[MixedNode.scala:551:17] wire [63:0] tlOtherMastersNodeOut_b_bits_data = buffer_auto_in_b_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeIn_c_ready; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_b_bits_corrupt = buffer_auto_in_b_bits_corrupt; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_c_ready = buffer_auto_in_c_ready; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_c_valid; // @[MixedNode.scala:542:17] wire buffer_nodeIn_c_valid = buffer_auto_in_c_valid; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeOut_c_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_c_bits_opcode = buffer_auto_in_c_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeOut_c_bits_param; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_c_bits_param = buffer_auto_in_c_bits_param; // @[Buffer.scala:40:9] wire [3:0] tlOtherMastersNodeOut_c_bits_size; // @[MixedNode.scala:542:17] wire [3:0] buffer_nodeIn_c_bits_size = buffer_auto_in_c_bits_size; // @[Buffer.scala:40:9] wire [1:0] tlOtherMastersNodeOut_c_bits_source; // @[MixedNode.scala:542:17] wire [1:0] buffer_nodeIn_c_bits_source = buffer_auto_in_c_bits_source; // @[Buffer.scala:40:9] wire [31:0] tlOtherMastersNodeOut_c_bits_address; // @[MixedNode.scala:542:17] wire [31:0] buffer_nodeIn_c_bits_address = buffer_auto_in_c_bits_address; // @[Buffer.scala:40:9] wire [63:0] tlOtherMastersNodeOut_c_bits_data; // @[MixedNode.scala:542:17] wire [63:0] buffer_nodeIn_c_bits_data = buffer_auto_in_c_bits_data; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_d_ready; // @[MixedNode.scala:542:17] wire buffer_nodeIn_d_ready = buffer_auto_in_d_ready; // @[Buffer.scala:40:9] wire buffer_nodeIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] buffer_nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_d_valid = buffer_auto_in_d_valid; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeIn_d_bits_param; // @[MixedNode.scala:551:17] wire [2:0] tlOtherMastersNodeOut_d_bits_opcode = buffer_auto_in_d_bits_opcode; // @[Buffer.scala:40:9] wire [3:0] buffer_nodeIn_d_bits_size; // @[MixedNode.scala:551:17] wire [1:0] tlOtherMastersNodeOut_d_bits_param = buffer_auto_in_d_bits_param; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeIn_d_bits_source; // @[MixedNode.scala:551:17] wire [3:0] tlOtherMastersNodeOut_d_bits_size = buffer_auto_in_d_bits_size; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeIn_d_bits_sink; // @[MixedNode.scala:551:17] wire [1:0] tlOtherMastersNodeOut_d_bits_source = buffer_auto_in_d_bits_source; // @[Buffer.scala:40:9] wire buffer_nodeIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [2:0] tlOtherMastersNodeOut_d_bits_sink = buffer_auto_in_d_bits_sink; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeIn_d_bits_data; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_d_bits_denied = buffer_auto_in_d_bits_denied; // @[Buffer.scala:40:9] wire buffer_nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire [63:0] tlOtherMastersNodeOut_d_bits_data = buffer_auto_in_d_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeIn_e_ready; // @[MixedNode.scala:551:17] wire tlOtherMastersNodeOut_d_bits_corrupt = buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_e_ready = buffer_auto_in_e_ready; // @[Buffer.scala:40:9] wire tlOtherMastersNodeOut_e_valid; // @[MixedNode.scala:542:17] wire buffer_nodeIn_e_valid = buffer_auto_in_e_valid; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeOut_e_bits_sink; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_e_bits_sink = buffer_auto_in_e_bits_sink; // @[Buffer.scala:40:9] wire buffer_nodeOut_a_ready = buffer_auto_out_a_ready; // @[Buffer.scala:40:9] wire buffer_nodeOut_a_valid; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_valid_0 = buffer_auto_out_a_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_opcode_0 = buffer_auto_out_a_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_a_bits_param; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_param_0 = buffer_auto_out_a_bits_param; // @[Buffer.scala:40:9] wire [3:0] buffer_nodeOut_a_bits_size; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_size_0 = buffer_auto_out_a_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_a_bits_source; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_source_0 = buffer_auto_out_a_bits_source; // @[Buffer.scala:40:9] wire [31:0] buffer_nodeOut_a_bits_address; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_address_0 = buffer_auto_out_a_bits_address; // @[Buffer.scala:40:9] wire [7:0] buffer_nodeOut_a_bits_mask; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_mask_0 = buffer_auto_out_a_bits_mask; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeOut_a_bits_data; // @[MixedNode.scala:542:17] assign auto_buffer_out_a_bits_data_0 = buffer_auto_out_a_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeOut_b_ready; // @[MixedNode.scala:542:17] assign auto_buffer_out_b_ready_0 = buffer_auto_out_b_ready; // @[Buffer.scala:40:9] wire buffer_nodeOut_b_valid = buffer_auto_out_b_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_b_bits_opcode = buffer_auto_out_b_bits_opcode; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_b_bits_param = buffer_auto_out_b_bits_param; // @[Buffer.scala:40:9] wire [3:0] buffer_nodeOut_b_bits_size = buffer_auto_out_b_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_b_bits_source = buffer_auto_out_b_bits_source; // @[Buffer.scala:40:9] wire [31:0] buffer_nodeOut_b_bits_address = buffer_auto_out_b_bits_address; // @[Buffer.scala:40:9] wire [7:0] buffer_nodeOut_b_bits_mask = buffer_auto_out_b_bits_mask; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeOut_b_bits_data = buffer_auto_out_b_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeOut_b_bits_corrupt = buffer_auto_out_b_bits_corrupt; // @[Buffer.scala:40:9] wire buffer_nodeOut_c_ready = buffer_auto_out_c_ready; // @[Buffer.scala:40:9] wire buffer_nodeOut_c_valid; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_valid_0 = buffer_auto_out_c_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_c_bits_opcode; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_bits_opcode_0 = buffer_auto_out_c_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_c_bits_param; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_bits_param_0 = buffer_auto_out_c_bits_param; // @[Buffer.scala:40:9] wire [3:0] buffer_nodeOut_c_bits_size; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_bits_size_0 = buffer_auto_out_c_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_c_bits_source; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_bits_source_0 = buffer_auto_out_c_bits_source; // @[Buffer.scala:40:9] wire [31:0] buffer_nodeOut_c_bits_address; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_bits_address_0 = buffer_auto_out_c_bits_address; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeOut_c_bits_data; // @[MixedNode.scala:542:17] assign auto_buffer_out_c_bits_data_0 = buffer_auto_out_c_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeOut_d_ready; // @[MixedNode.scala:542:17] assign auto_buffer_out_d_ready_0 = buffer_auto_out_d_ready; // @[Buffer.scala:40:9] wire buffer_nodeOut_d_valid = buffer_auto_out_d_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_d_bits_opcode = buffer_auto_out_d_bits_opcode; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_d_bits_param = buffer_auto_out_d_bits_param; // @[Buffer.scala:40:9] wire [3:0] buffer_nodeOut_d_bits_size = buffer_auto_out_d_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_d_bits_source = buffer_auto_out_d_bits_source; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_d_bits_sink = buffer_auto_out_d_bits_sink; // @[Buffer.scala:40:9] wire buffer_nodeOut_d_bits_denied = buffer_auto_out_d_bits_denied; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeOut_d_bits_data = buffer_auto_out_d_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeOut_d_bits_corrupt = buffer_auto_out_d_bits_corrupt; // @[Buffer.scala:40:9] wire buffer_nodeOut_e_ready = buffer_auto_out_e_ready; // @[Buffer.scala:40:9] wire buffer_nodeOut_e_valid; // @[MixedNode.scala:542:17] assign auto_buffer_out_e_valid_0 = buffer_auto_out_e_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_e_bits_sink; // @[MixedNode.scala:542:17] assign auto_buffer_out_e_bits_sink_0 = buffer_auto_out_e_bits_sink; // @[Buffer.scala:40:9] assign buffer_nodeIn_a_ready = buffer_nodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_out_a_valid = buffer_nodeOut_a_valid; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_opcode = buffer_nodeOut_a_bits_opcode; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_param = buffer_nodeOut_a_bits_param; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_size = buffer_nodeOut_a_bits_size; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_source = buffer_nodeOut_a_bits_source; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_address = buffer_nodeOut_a_bits_address; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_mask = buffer_nodeOut_a_bits_mask; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_data = buffer_nodeOut_a_bits_data; // @[Buffer.scala:40:9] assign buffer_auto_out_b_ready = buffer_nodeOut_b_ready; // @[Buffer.scala:40:9] assign buffer_nodeIn_b_valid = buffer_nodeOut_b_valid; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_opcode = buffer_nodeOut_b_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_param = buffer_nodeOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_size = buffer_nodeOut_b_bits_size; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_source = buffer_nodeOut_b_bits_source; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_address = buffer_nodeOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_mask = buffer_nodeOut_b_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_data = buffer_nodeOut_b_bits_data; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_b_bits_corrupt = buffer_nodeOut_b_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_c_ready = buffer_nodeOut_c_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_out_c_valid = buffer_nodeOut_c_valid; // @[Buffer.scala:40:9] assign buffer_auto_out_c_bits_opcode = buffer_nodeOut_c_bits_opcode; // @[Buffer.scala:40:9] assign buffer_auto_out_c_bits_param = buffer_nodeOut_c_bits_param; // @[Buffer.scala:40:9] assign buffer_auto_out_c_bits_size = buffer_nodeOut_c_bits_size; // @[Buffer.scala:40:9] assign buffer_auto_out_c_bits_source = buffer_nodeOut_c_bits_source; // @[Buffer.scala:40:9] assign buffer_auto_out_c_bits_address = buffer_nodeOut_c_bits_address; // @[Buffer.scala:40:9] assign buffer_auto_out_c_bits_data = buffer_nodeOut_c_bits_data; // @[Buffer.scala:40:9] assign buffer_auto_out_d_ready = buffer_nodeOut_d_ready; // @[Buffer.scala:40:9] assign buffer_nodeIn_d_valid = buffer_nodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_opcode = buffer_nodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_param = buffer_nodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_size = buffer_nodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_source = buffer_nodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_sink = buffer_nodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_denied = buffer_nodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_data = buffer_nodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_corrupt = buffer_nodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_e_ready = buffer_nodeOut_e_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_out_e_valid = buffer_nodeOut_e_valid; // @[Buffer.scala:40:9] assign buffer_auto_out_e_bits_sink = buffer_nodeOut_e_bits_sink; // @[Buffer.scala:40:9] assign buffer_auto_in_a_ready = buffer_nodeIn_a_ready; // @[Buffer.scala:40:9] assign buffer_nodeOut_a_valid = buffer_nodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_opcode = buffer_nodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_param = buffer_nodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_size = buffer_nodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_source = buffer_nodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_address = buffer_nodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_mask = buffer_nodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_data = buffer_nodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_b_ready = buffer_nodeIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_in_b_valid = buffer_nodeIn_b_valid; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_opcode = buffer_nodeIn_b_bits_opcode; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_param = buffer_nodeIn_b_bits_param; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_size = buffer_nodeIn_b_bits_size; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_source = buffer_nodeIn_b_bits_source; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_address = buffer_nodeIn_b_bits_address; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_mask = buffer_nodeIn_b_bits_mask; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_data = buffer_nodeIn_b_bits_data; // @[Buffer.scala:40:9] assign buffer_auto_in_b_bits_corrupt = buffer_nodeIn_b_bits_corrupt; // @[Buffer.scala:40:9] assign buffer_auto_in_c_ready = buffer_nodeIn_c_ready; // @[Buffer.scala:40:9] assign buffer_nodeOut_c_valid = buffer_nodeIn_c_valid; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_c_bits_opcode = buffer_nodeIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_c_bits_param = buffer_nodeIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_c_bits_size = buffer_nodeIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_c_bits_source = buffer_nodeIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_c_bits_address = buffer_nodeIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_c_bits_data = buffer_nodeIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_d_ready = buffer_nodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_in_d_valid = buffer_nodeIn_d_valid; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_opcode = buffer_nodeIn_d_bits_opcode; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_param = buffer_nodeIn_d_bits_param; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_size = buffer_nodeIn_d_bits_size; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_source = buffer_nodeIn_d_bits_source; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_sink = buffer_nodeIn_d_bits_sink; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_denied = buffer_nodeIn_d_bits_denied; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_data = buffer_nodeIn_d_bits_data; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_corrupt = buffer_nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9] assign buffer_auto_in_e_ready = buffer_nodeIn_e_ready; // @[Buffer.scala:40:9] assign buffer_nodeOut_e_valid = buffer_nodeIn_e_valid; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_e_bits_sink = buffer_nodeIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_a_ready = tlOtherMastersNodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_a_valid; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_valid = tlOtherMastersNodeOut_a_valid; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeIn_a_bits_opcode; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_opcode = tlOtherMastersNodeOut_a_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeIn_a_bits_param; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_param = tlOtherMastersNodeOut_a_bits_param; // @[Buffer.scala:40:9] wire [3:0] tlOtherMastersNodeIn_a_bits_size; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_size = tlOtherMastersNodeOut_a_bits_size; // @[Buffer.scala:40:9] wire [1:0] tlOtherMastersNodeIn_a_bits_source; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_source = tlOtherMastersNodeOut_a_bits_source; // @[Buffer.scala:40:9] wire [31:0] tlOtherMastersNodeIn_a_bits_address; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_address = tlOtherMastersNodeOut_a_bits_address; // @[Buffer.scala:40:9] wire [7:0] tlOtherMastersNodeIn_a_bits_mask; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_mask = tlOtherMastersNodeOut_a_bits_mask; // @[Buffer.scala:40:9] wire [63:0] tlOtherMastersNodeIn_a_bits_data; // @[MixedNode.scala:551:17] assign buffer_auto_in_a_bits_data = tlOtherMastersNodeOut_a_bits_data; // @[Buffer.scala:40:9] wire tlOtherMastersNodeIn_b_ready; // @[MixedNode.scala:551:17] assign buffer_auto_in_b_ready = tlOtherMastersNodeOut_b_ready; // @[Buffer.scala:40:9] wire tlOtherMastersNodeIn_b_valid = tlOtherMastersNodeOut_b_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlOtherMastersNodeIn_b_bits_opcode = tlOtherMastersNodeOut_b_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlOtherMastersNodeIn_b_bits_param = tlOtherMastersNodeOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlOtherMastersNodeIn_b_bits_size = tlOtherMastersNodeOut_b_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlOtherMastersNodeIn_b_bits_source = tlOtherMastersNodeOut_b_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [31:0] tlOtherMastersNodeIn_b_bits_address = tlOtherMastersNodeOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17] wire [7:0] tlOtherMastersNodeIn_b_bits_mask = tlOtherMastersNodeOut_b_bits_mask; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlOtherMastersNodeIn_b_bits_data = tlOtherMastersNodeOut_b_bits_data; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_b_bits_corrupt = tlOtherMastersNodeOut_b_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_c_ready = tlOtherMastersNodeOut_c_ready; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_c_valid; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_valid = tlOtherMastersNodeOut_c_valid; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeIn_c_bits_opcode; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_bits_opcode = tlOtherMastersNodeOut_c_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeIn_c_bits_param; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_bits_param = tlOtherMastersNodeOut_c_bits_param; // @[Buffer.scala:40:9] wire [3:0] tlOtherMastersNodeIn_c_bits_size; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_bits_size = tlOtherMastersNodeOut_c_bits_size; // @[Buffer.scala:40:9] wire [1:0] tlOtherMastersNodeIn_c_bits_source; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_bits_source = tlOtherMastersNodeOut_c_bits_source; // @[Buffer.scala:40:9] wire [31:0] tlOtherMastersNodeIn_c_bits_address; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_bits_address = tlOtherMastersNodeOut_c_bits_address; // @[Buffer.scala:40:9] wire [63:0] tlOtherMastersNodeIn_c_bits_data; // @[MixedNode.scala:551:17] assign buffer_auto_in_c_bits_data = tlOtherMastersNodeOut_c_bits_data; // @[Buffer.scala:40:9] wire tlOtherMastersNodeIn_d_ready; // @[MixedNode.scala:551:17] assign buffer_auto_in_d_ready = tlOtherMastersNodeOut_d_ready; // @[Buffer.scala:40:9] wire tlOtherMastersNodeIn_d_valid = tlOtherMastersNodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlOtherMastersNodeIn_d_bits_opcode = tlOtherMastersNodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlOtherMastersNodeIn_d_bits_param = tlOtherMastersNodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlOtherMastersNodeIn_d_bits_size = tlOtherMastersNodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlOtherMastersNodeIn_d_bits_source = tlOtherMastersNodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlOtherMastersNodeIn_d_bits_sink = tlOtherMastersNodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_d_bits_denied = tlOtherMastersNodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlOtherMastersNodeIn_d_bits_data = tlOtherMastersNodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_d_bits_corrupt = tlOtherMastersNodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_e_ready = tlOtherMastersNodeOut_e_ready; // @[MixedNode.scala:542:17, :551:17] wire tlOtherMastersNodeIn_e_valid; // @[MixedNode.scala:551:17] assign buffer_auto_in_e_valid = tlOtherMastersNodeOut_e_valid; // @[Buffer.scala:40:9] wire [2:0] tlOtherMastersNodeIn_e_bits_sink; // @[MixedNode.scala:551:17] assign buffer_auto_in_e_bits_sink = tlOtherMastersNodeOut_e_bits_sink; // @[Buffer.scala:40:9] assign tlOtherMastersNodeOut_a_valid = tlOtherMastersNodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_opcode = tlOtherMastersNodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_param = tlOtherMastersNodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_size = tlOtherMastersNodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_source = tlOtherMastersNodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_address = tlOtherMastersNodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_mask = tlOtherMastersNodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_a_bits_data = tlOtherMastersNodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_b_ready = tlOtherMastersNodeIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_valid = tlOtherMastersNodeIn_c_valid; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_bits_opcode = tlOtherMastersNodeIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_bits_param = tlOtherMastersNodeIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_bits_size = tlOtherMastersNodeIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_bits_source = tlOtherMastersNodeIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_bits_address = tlOtherMastersNodeIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_c_bits_data = tlOtherMastersNodeIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_d_ready = tlOtherMastersNodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_e_valid = tlOtherMastersNodeIn_e_valid; // @[MixedNode.scala:542:17, :551:17] assign tlOtherMastersNodeOut_e_bits_sink = tlOtherMastersNodeIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign broadcast_auto_in = hartidOut; // @[MixedNode.scala:542:17] assign hartidOut = hartidIn; // @[MixedNode.scala:542:17, :551:17] assign auto_trace_source_out_insns_0_valid_0 = traceSourceNodeOut_insns_0_valid; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_iaddr_0 = traceSourceNodeOut_insns_0_iaddr; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_insn_0 = traceSourceNodeOut_insns_0_insn; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_priv_0 = traceSourceNodeOut_insns_0_priv; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_exception_0 = traceSourceNodeOut_insns_0_exception; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_interrupt_0 = traceSourceNodeOut_insns_0_interrupt; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_cause_0 = traceSourceNodeOut_insns_0_cause; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_tval_0 = traceSourceNodeOut_insns_0_tval; // @[RocketTile.scala:141:7] assign auto_trace_source_out_time_0 = traceSourceNodeOut_time; // @[RocketTile.scala:141:7] assign broadcast_2_auto_in_0_valid_0 = bpwatchSourceNodeOut_0_valid_0; // @[MixedNode.scala:542:17] assign broadcast_2_auto_in_0_action = bpwatchSourceNodeOut_0_action; // @[MixedNode.scala:542:17] wire int_localOut_0; // @[MixedNode.scala:542:17] wire x1_int_localOut_0; // @[MixedNode.scala:542:17] wire x1_int_localOut_1; // @[MixedNode.scala:542:17] wire x1_int_localOut_1_0; // @[MixedNode.scala:542:17] wire x1_int_localOut_2_0; // @[MixedNode.scala:542:17] assign int_localOut_0 = int_localIn_0; // @[MixedNode.scala:542:17, :551:17] assign x1_int_localOut_0 = x1_int_localIn_0; // @[MixedNode.scala:542:17, :551:17] assign x1_int_localOut_1 = x1_int_localIn_1; // @[MixedNode.scala:542:17, :551:17] assign x1_int_localOut_1_0 = x1_int_localIn_1_0; // @[MixedNode.scala:542:17, :551:17] assign x1_int_localOut_2_0 = x1_int_localIn_2_0; // @[MixedNode.scala:542:17, :551:17] wire intSinkNodeIn_0; // @[MixedNode.scala:551:17] wire intSinkNodeIn_1; // @[MixedNode.scala:551:17] wire intSinkNodeIn_2; // @[MixedNode.scala:551:17] wire intSinkNodeIn_3; // @[MixedNode.scala:551:17] wire intSinkNodeIn_4; // @[MixedNode.scala:551:17] assign auto_wfi_out_0_0 = wfiNodeOut_0; // @[RocketTile.scala:141:7] reg wfiNodeOut_0_REG; // @[Interrupts.scala:131:36] assign wfiNodeOut_0 = wfiNodeOut_0_REG; // @[Interrupts.scala:131:36] always @(posedge clock) begin // @[RocketTile.scala:141:7] if (reset) // @[RocketTile.scala:141:7] wfiNodeOut_0_REG <= 1'h0; // @[Interrupts.scala:131:36] else // @[RocketTile.scala:141:7] wfiNodeOut_0_REG <= _core_io_wfi; // @[RocketTile.scala:147:20] always @(posedge) TLXbar_MasterXbar_RocketTile_i2_o1_a32d64s2k3z4c_4 tlMasterXbar ( // @[HierarchicalElement.scala:55:42] .clock (clock), .reset (reset), .auto_anon_in_1_a_ready (widget_1_auto_anon_out_a_ready), .auto_anon_in_1_a_valid (widget_1_auto_anon_out_a_valid), // @[WidthWidget.scala:27:9] .auto_anon_in_1_a_bits_address (widget_1_auto_anon_out_a_bits_address), // @[WidthWidget.scala:27:9] .auto_anon_in_1_d_valid (widget_1_auto_anon_out_d_valid), .auto_anon_in_1_d_bits_opcode (widget_1_auto_anon_out_d_bits_opcode), .auto_anon_in_1_d_bits_param (widget_1_auto_anon_out_d_bits_param), .auto_anon_in_1_d_bits_size (widget_1_auto_anon_out_d_bits_size), .auto_anon_in_1_d_bits_sink (widget_1_auto_anon_out_d_bits_sink), .auto_anon_in_1_d_bits_denied (widget_1_auto_anon_out_d_bits_denied), .auto_anon_in_1_d_bits_data (widget_1_auto_anon_out_d_bits_data), .auto_anon_in_1_d_bits_corrupt (widget_1_auto_anon_out_d_bits_corrupt), .auto_anon_in_0_a_ready (widget_auto_anon_out_a_ready), .auto_anon_in_0_a_valid (widget_auto_anon_out_a_valid), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_opcode (widget_auto_anon_out_a_bits_opcode), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_param (widget_auto_anon_out_a_bits_param), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_size (widget_auto_anon_out_a_bits_size), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_source (widget_auto_anon_out_a_bits_source), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_address (widget_auto_anon_out_a_bits_address), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_mask (widget_auto_anon_out_a_bits_mask), // @[WidthWidget.scala:27:9] .auto_anon_in_0_a_bits_data (widget_auto_anon_out_a_bits_data), // @[WidthWidget.scala:27:9] .auto_anon_in_0_b_ready (widget_auto_anon_out_b_ready), // @[WidthWidget.scala:27:9] .auto_anon_in_0_b_valid (widget_auto_anon_out_b_valid), .auto_anon_in_0_b_bits_opcode (widget_auto_anon_out_b_bits_opcode), .auto_anon_in_0_b_bits_param (widget_auto_anon_out_b_bits_param), .auto_anon_in_0_b_bits_size (widget_auto_anon_out_b_bits_size), .auto_anon_in_0_b_bits_source (widget_auto_anon_out_b_bits_source), .auto_anon_in_0_b_bits_address (widget_auto_anon_out_b_bits_address), .auto_anon_in_0_b_bits_mask (widget_auto_anon_out_b_bits_mask), .auto_anon_in_0_b_bits_data (widget_auto_anon_out_b_bits_data), .auto_anon_in_0_b_bits_corrupt (widget_auto_anon_out_b_bits_corrupt), .auto_anon_in_0_c_ready (widget_auto_anon_out_c_ready), .auto_anon_in_0_c_valid (widget_auto_anon_out_c_valid), // @[WidthWidget.scala:27:9] .auto_anon_in_0_c_bits_opcode (widget_auto_anon_out_c_bits_opcode), // @[WidthWidget.scala:27:9] .auto_anon_in_0_c_bits_param (widget_auto_anon_out_c_bits_param), // @[WidthWidget.scala:27:9] .auto_anon_in_0_c_bits_size (widget_auto_anon_out_c_bits_size), // @[WidthWidget.scala:27:9] .auto_anon_in_0_c_bits_source (widget_auto_anon_out_c_bits_source), // @[WidthWidget.scala:27:9] .auto_anon_in_0_c_bits_address (widget_auto_anon_out_c_bits_address), // @[WidthWidget.scala:27:9] .auto_anon_in_0_c_bits_data (widget_auto_anon_out_c_bits_data), // @[WidthWidget.scala:27:9] .auto_anon_in_0_d_ready (widget_auto_anon_out_d_ready), // @[WidthWidget.scala:27:9] .auto_anon_in_0_d_valid (widget_auto_anon_out_d_valid), .auto_anon_in_0_d_bits_opcode (widget_auto_anon_out_d_bits_opcode), .auto_anon_in_0_d_bits_param (widget_auto_anon_out_d_bits_param), .auto_anon_in_0_d_bits_size (widget_auto_anon_out_d_bits_size), .auto_anon_in_0_d_bits_source (widget_auto_anon_out_d_bits_source), .auto_anon_in_0_d_bits_sink (widget_auto_anon_out_d_bits_sink), .auto_anon_in_0_d_bits_denied (widget_auto_anon_out_d_bits_denied), .auto_anon_in_0_d_bits_data (widget_auto_anon_out_d_bits_data), .auto_anon_in_0_d_bits_corrupt (widget_auto_anon_out_d_bits_corrupt), .auto_anon_in_0_e_ready (widget_auto_anon_out_e_ready), .auto_anon_in_0_e_valid (widget_auto_anon_out_e_valid), // @[WidthWidget.scala:27:9] .auto_anon_in_0_e_bits_sink (widget_auto_anon_out_e_bits_sink), // @[WidthWidget.scala:27:9] .auto_anon_out_a_ready (tlOtherMastersNodeIn_a_ready), // @[MixedNode.scala:551:17] .auto_anon_out_a_valid (tlOtherMastersNodeIn_a_valid), .auto_anon_out_a_bits_opcode (tlOtherMastersNodeIn_a_bits_opcode), .auto_anon_out_a_bits_param (tlOtherMastersNodeIn_a_bits_param), .auto_anon_out_a_bits_size (tlOtherMastersNodeIn_a_bits_size), .auto_anon_out_a_bits_source (tlOtherMastersNodeIn_a_bits_source), .auto_anon_out_a_bits_address (tlOtherMastersNodeIn_a_bits_address), .auto_anon_out_a_bits_mask (tlOtherMastersNodeIn_a_bits_mask), .auto_anon_out_a_bits_data (tlOtherMastersNodeIn_a_bits_data), .auto_anon_out_b_ready (tlOtherMastersNodeIn_b_ready), .auto_anon_out_b_valid (tlOtherMastersNodeIn_b_valid), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_opcode (tlOtherMastersNodeIn_b_bits_opcode), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_param (tlOtherMastersNodeIn_b_bits_param), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_size (tlOtherMastersNodeIn_b_bits_size), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_source (tlOtherMastersNodeIn_b_bits_source), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_address (tlOtherMastersNodeIn_b_bits_address), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_mask (tlOtherMastersNodeIn_b_bits_mask), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_data (tlOtherMastersNodeIn_b_bits_data), // @[MixedNode.scala:551:17] .auto_anon_out_b_bits_corrupt (tlOtherMastersNodeIn_b_bits_corrupt), // @[MixedNode.scala:551:17] .auto_anon_out_c_ready (tlOtherMastersNodeIn_c_ready), // @[MixedNode.scala:551:17] .auto_anon_out_c_valid (tlOtherMastersNodeIn_c_valid), .auto_anon_out_c_bits_opcode (tlOtherMastersNodeIn_c_bits_opcode), .auto_anon_out_c_bits_param (tlOtherMastersNodeIn_c_bits_param), .auto_anon_out_c_bits_size (tlOtherMastersNodeIn_c_bits_size), .auto_anon_out_c_bits_source (tlOtherMastersNodeIn_c_bits_source), .auto_anon_out_c_bits_address (tlOtherMastersNodeIn_c_bits_address), .auto_anon_out_c_bits_data (tlOtherMastersNodeIn_c_bits_data), .auto_anon_out_d_ready (tlOtherMastersNodeIn_d_ready), .auto_anon_out_d_valid (tlOtherMastersNodeIn_d_valid), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_opcode (tlOtherMastersNodeIn_d_bits_opcode), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_param (tlOtherMastersNodeIn_d_bits_param), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_size (tlOtherMastersNodeIn_d_bits_size), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_source (tlOtherMastersNodeIn_d_bits_source), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_sink (tlOtherMastersNodeIn_d_bits_sink), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_denied (tlOtherMastersNodeIn_d_bits_denied), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_data (tlOtherMastersNodeIn_d_bits_data), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_corrupt (tlOtherMastersNodeIn_d_bits_corrupt), // @[MixedNode.scala:551:17] .auto_anon_out_e_ready (tlOtherMastersNodeIn_e_ready), // @[MixedNode.scala:551:17] .auto_anon_out_e_valid (tlOtherMastersNodeIn_e_valid), .auto_anon_out_e_bits_sink (tlOtherMastersNodeIn_e_bits_sink) ); // @[HierarchicalElement.scala:55:42] TLXbar_SlaveXbar_RocketTile_i0_o0_a1d8s1k1z1u_4 tlSlaveXbar ( // @[HierarchicalElement.scala:56:41] .clock (clock), .reset (reset) ); // @[HierarchicalElement.scala:56:41] IntXbar_i4_o1_4 intXbar ( // @[HierarchicalElement.scala:57:37] .auto_anon_in_3_0 (x1_int_localOut_2_0), // @[MixedNode.scala:542:17] .auto_anon_in_2_0 (x1_int_localOut_1_0), // @[MixedNode.scala:542:17] .auto_anon_in_1_0 (x1_int_localOut_0), // @[MixedNode.scala:542:17] .auto_anon_in_1_1 (x1_int_localOut_1), // @[MixedNode.scala:542:17] .auto_anon_in_0_0 (int_localOut_0), // @[MixedNode.scala:542:17] .auto_anon_out_0 (intSinkNodeIn_0), .auto_anon_out_1 (intSinkNodeIn_1), .auto_anon_out_2 (intSinkNodeIn_2), .auto_anon_out_3 (intSinkNodeIn_3), .auto_anon_out_4 (intSinkNodeIn_4) ); // @[HierarchicalElement.scala:57:37] DCache_4 dcache ( // @[HellaCache.scala:278:43] .clock (clock), .reset (reset), .auto_out_a_ready (widget_auto_anon_in_a_ready), // @[WidthWidget.scala:27:9] .auto_out_a_valid (widget_auto_anon_in_a_valid), .auto_out_a_bits_opcode (widget_auto_anon_in_a_bits_opcode), .auto_out_a_bits_param (widget_auto_anon_in_a_bits_param), .auto_out_a_bits_size (widget_auto_anon_in_a_bits_size), .auto_out_a_bits_source (widget_auto_anon_in_a_bits_source), .auto_out_a_bits_address (widget_auto_anon_in_a_bits_address), .auto_out_a_bits_mask (widget_auto_anon_in_a_bits_mask), .auto_out_a_bits_data (widget_auto_anon_in_a_bits_data), .auto_out_b_ready (widget_auto_anon_in_b_ready), .auto_out_b_valid (widget_auto_anon_in_b_valid), // @[WidthWidget.scala:27:9] .auto_out_b_bits_opcode (widget_auto_anon_in_b_bits_opcode), // @[WidthWidget.scala:27:9] .auto_out_b_bits_param (widget_auto_anon_in_b_bits_param), // @[WidthWidget.scala:27:9] .auto_out_b_bits_size (widget_auto_anon_in_b_bits_size), // @[WidthWidget.scala:27:9] .auto_out_b_bits_source (widget_auto_anon_in_b_bits_source), // @[WidthWidget.scala:27:9] .auto_out_b_bits_address (widget_auto_anon_in_b_bits_address), // @[WidthWidget.scala:27:9] .auto_out_b_bits_mask (widget_auto_anon_in_b_bits_mask), // @[WidthWidget.scala:27:9] .auto_out_b_bits_data (widget_auto_anon_in_b_bits_data), // @[WidthWidget.scala:27:9] .auto_out_b_bits_corrupt (widget_auto_anon_in_b_bits_corrupt), // @[WidthWidget.scala:27:9] .auto_out_c_ready (widget_auto_anon_in_c_ready), // @[WidthWidget.scala:27:9] .auto_out_c_valid (widget_auto_anon_in_c_valid), .auto_out_c_bits_opcode (widget_auto_anon_in_c_bits_opcode), .auto_out_c_bits_param (widget_auto_anon_in_c_bits_param), .auto_out_c_bits_size (widget_auto_anon_in_c_bits_size), .auto_out_c_bits_source (widget_auto_anon_in_c_bits_source), .auto_out_c_bits_address (widget_auto_anon_in_c_bits_address), .auto_out_c_bits_data (widget_auto_anon_in_c_bits_data), .auto_out_d_ready (widget_auto_anon_in_d_ready), .auto_out_d_valid (widget_auto_anon_in_d_valid), // @[WidthWidget.scala:27:9] .auto_out_d_bits_opcode (widget_auto_anon_in_d_bits_opcode), // @[WidthWidget.scala:27:9] .auto_out_d_bits_param (widget_auto_anon_in_d_bits_param), // @[WidthWidget.scala:27:9] .auto_out_d_bits_size (widget_auto_anon_in_d_bits_size), // @[WidthWidget.scala:27:9] .auto_out_d_bits_source (widget_auto_anon_in_d_bits_source), // @[WidthWidget.scala:27:9] .auto_out_d_bits_sink (widget_auto_anon_in_d_bits_sink), // @[WidthWidget.scala:27:9] .auto_out_d_bits_denied (widget_auto_anon_in_d_bits_denied), // @[WidthWidget.scala:27:9] .auto_out_d_bits_data (widget_auto_anon_in_d_bits_data), // @[WidthWidget.scala:27:9] .auto_out_d_bits_corrupt (widget_auto_anon_in_d_bits_corrupt), // @[WidthWidget.scala:27:9] .auto_out_e_ready (widget_auto_anon_in_e_ready), // @[WidthWidget.scala:27:9] .auto_out_e_valid (widget_auto_anon_in_e_valid), .auto_out_e_bits_sink (widget_auto_anon_in_e_bits_sink), .io_cpu_req_ready (_dcache_io_cpu_req_ready), .io_cpu_req_valid (_dcacheArb_io_mem_req_valid), // @[HellaCache.scala:292:25] .io_cpu_req_bits_addr (_dcacheArb_io_mem_req_bits_addr), // @[HellaCache.scala:292:25] .io_cpu_req_bits_tag (_dcacheArb_io_mem_req_bits_tag), // @[HellaCache.scala:292:25] .io_cpu_req_bits_cmd (_dcacheArb_io_mem_req_bits_cmd), // @[HellaCache.scala:292:25] .io_cpu_req_bits_size (_dcacheArb_io_mem_req_bits_size), // @[HellaCache.scala:292:25] .io_cpu_req_bits_signed (_dcacheArb_io_mem_req_bits_signed), // @[HellaCache.scala:292:25] .io_cpu_req_bits_dprv (_dcacheArb_io_mem_req_bits_dprv), // @[HellaCache.scala:292:25] .io_cpu_req_bits_dv (_dcacheArb_io_mem_req_bits_dv), // @[HellaCache.scala:292:25] .io_cpu_req_bits_phys (_dcacheArb_io_mem_req_bits_phys), // @[HellaCache.scala:292:25] .io_cpu_req_bits_no_resp (_dcacheArb_io_mem_req_bits_no_resp), // @[HellaCache.scala:292:25] .io_cpu_s1_kill (_dcacheArb_io_mem_s1_kill), // @[HellaCache.scala:292:25] .io_cpu_s1_data_data (_dcacheArb_io_mem_s1_data_data), // @[HellaCache.scala:292:25] .io_cpu_s1_data_mask (8'h0), // @[RocketTile.scala:147:20] .io_cpu_s2_nack (_dcache_io_cpu_s2_nack), .io_cpu_s2_nack_cause_raw (_dcache_io_cpu_s2_nack_cause_raw), .io_cpu_s2_uncached (_dcache_io_cpu_s2_uncached), .io_cpu_s2_paddr (_dcache_io_cpu_s2_paddr), .io_cpu_resp_valid (_dcache_io_cpu_resp_valid), .io_cpu_resp_bits_addr (_dcache_io_cpu_resp_bits_addr), .io_cpu_resp_bits_tag (_dcache_io_cpu_resp_bits_tag), .io_cpu_resp_bits_cmd (_dcache_io_cpu_resp_bits_cmd), .io_cpu_resp_bits_size (_dcache_io_cpu_resp_bits_size), .io_cpu_resp_bits_signed (_dcache_io_cpu_resp_bits_signed), .io_cpu_resp_bits_dprv (_dcache_io_cpu_resp_bits_dprv), .io_cpu_resp_bits_dv (_dcache_io_cpu_resp_bits_dv), .io_cpu_resp_bits_data (_dcache_io_cpu_resp_bits_data), .io_cpu_resp_bits_mask (_dcache_io_cpu_resp_bits_mask), .io_cpu_resp_bits_replay (_dcache_io_cpu_resp_bits_replay), .io_cpu_resp_bits_has_data (_dcache_io_cpu_resp_bits_has_data), .io_cpu_resp_bits_data_word_bypass (_dcache_io_cpu_resp_bits_data_word_bypass), .io_cpu_resp_bits_data_raw (_dcache_io_cpu_resp_bits_data_raw), .io_cpu_resp_bits_store_data (_dcache_io_cpu_resp_bits_store_data), .io_cpu_replay_next (_dcache_io_cpu_replay_next), .io_cpu_s2_xcpt_ma_ld (_dcache_io_cpu_s2_xcpt_ma_ld), .io_cpu_s2_xcpt_ma_st (_dcache_io_cpu_s2_xcpt_ma_st), .io_cpu_s2_xcpt_pf_ld (_dcache_io_cpu_s2_xcpt_pf_ld), .io_cpu_s2_xcpt_pf_st (_dcache_io_cpu_s2_xcpt_pf_st), .io_cpu_s2_xcpt_ae_ld (_dcache_io_cpu_s2_xcpt_ae_ld), .io_cpu_s2_xcpt_ae_st (_dcache_io_cpu_s2_xcpt_ae_st), .io_cpu_s2_gpa (_dcache_io_cpu_s2_gpa), .io_cpu_ordered (_dcache_io_cpu_ordered), .io_cpu_store_pending (_dcache_io_cpu_store_pending), .io_cpu_perf_acquire (_dcache_io_cpu_perf_acquire), .io_cpu_perf_release (_dcache_io_cpu_perf_release), .io_cpu_perf_grant (_dcache_io_cpu_perf_grant), .io_cpu_perf_tlbMiss (_dcache_io_cpu_perf_tlbMiss), .io_cpu_perf_blocked (_dcache_io_cpu_perf_blocked), .io_cpu_perf_canAcceptStoreThenLoad (_dcache_io_cpu_perf_canAcceptStoreThenLoad), .io_cpu_perf_canAcceptStoreThenRMW (_dcache_io_cpu_perf_canAcceptStoreThenRMW), .io_cpu_perf_canAcceptLoadThenLoad (_dcache_io_cpu_perf_canAcceptLoadThenLoad), .io_cpu_perf_storeBufferEmptyAfterLoad (_dcache_io_cpu_perf_storeBufferEmptyAfterLoad), .io_cpu_perf_storeBufferEmptyAfterStore (_dcache_io_cpu_perf_storeBufferEmptyAfterStore), .io_cpu_keep_clock_enabled (_dcacheArb_io_mem_keep_clock_enabled), // @[HellaCache.scala:292:25] .io_ptw_req_ready (_ptw_io_requestor_0_req_ready), // @[PTW.scala:802:19] .io_ptw_req_valid (_dcache_io_ptw_req_valid), .io_ptw_req_bits_bits_addr (_dcache_io_ptw_req_bits_bits_addr), .io_ptw_req_bits_bits_need_gpa (_dcache_io_ptw_req_bits_bits_need_gpa), .io_ptw_resp_valid (_ptw_io_requestor_0_resp_valid), // @[PTW.scala:802:19] .io_ptw_resp_bits_ae_ptw (_ptw_io_requestor_0_resp_bits_ae_ptw), // @[PTW.scala:802:19] .io_ptw_resp_bits_ae_final (_ptw_io_requestor_0_resp_bits_ae_final), // @[PTW.scala:802:19] .io_ptw_resp_bits_pf (_ptw_io_requestor_0_resp_bits_pf), // @[PTW.scala:802:19] .io_ptw_resp_bits_gf (_ptw_io_requestor_0_resp_bits_gf), // @[PTW.scala:802:19] .io_ptw_resp_bits_hr (_ptw_io_requestor_0_resp_bits_hr), // @[PTW.scala:802:19] .io_ptw_resp_bits_hw (_ptw_io_requestor_0_resp_bits_hw), // @[PTW.scala:802:19] .io_ptw_resp_bits_hx (_ptw_io_requestor_0_resp_bits_hx), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_reserved_for_future (_ptw_io_requestor_0_resp_bits_pte_reserved_for_future), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_ppn (_ptw_io_requestor_0_resp_bits_pte_ppn), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_reserved_for_software (_ptw_io_requestor_0_resp_bits_pte_reserved_for_software), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_d (_ptw_io_requestor_0_resp_bits_pte_d), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_a (_ptw_io_requestor_0_resp_bits_pte_a), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_g (_ptw_io_requestor_0_resp_bits_pte_g), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_u (_ptw_io_requestor_0_resp_bits_pte_u), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_x (_ptw_io_requestor_0_resp_bits_pte_x), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_w (_ptw_io_requestor_0_resp_bits_pte_w), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_r (_ptw_io_requestor_0_resp_bits_pte_r), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_v (_ptw_io_requestor_0_resp_bits_pte_v), // @[PTW.scala:802:19] .io_ptw_resp_bits_level (_ptw_io_requestor_0_resp_bits_level), // @[PTW.scala:802:19] .io_ptw_resp_bits_homogeneous (_ptw_io_requestor_0_resp_bits_homogeneous), // @[PTW.scala:802:19] .io_ptw_resp_bits_gpa_valid (_ptw_io_requestor_0_resp_bits_gpa_valid), // @[PTW.scala:802:19] .io_ptw_resp_bits_gpa_bits (_ptw_io_requestor_0_resp_bits_gpa_bits), // @[PTW.scala:802:19] .io_ptw_resp_bits_gpa_is_pte (_ptw_io_requestor_0_resp_bits_gpa_is_pte), // @[PTW.scala:802:19] .io_ptw_ptbr_mode (_ptw_io_requestor_0_ptbr_mode), // @[PTW.scala:802:19] .io_ptw_ptbr_ppn (_ptw_io_requestor_0_ptbr_ppn), // @[PTW.scala:802:19] .io_ptw_status_debug (_ptw_io_requestor_0_status_debug), // @[PTW.scala:802:19] .io_ptw_status_cease (_ptw_io_requestor_0_status_cease), // @[PTW.scala:802:19] .io_ptw_status_wfi (_ptw_io_requestor_0_status_wfi), // @[PTW.scala:802:19] .io_ptw_status_isa (_ptw_io_requestor_0_status_isa), // @[PTW.scala:802:19] .io_ptw_status_dprv (_ptw_io_requestor_0_status_dprv), // @[PTW.scala:802:19] .io_ptw_status_dv (_ptw_io_requestor_0_status_dv), // @[PTW.scala:802:19] .io_ptw_status_prv (_ptw_io_requestor_0_status_prv), // @[PTW.scala:802:19] .io_ptw_status_v (_ptw_io_requestor_0_status_v), // @[PTW.scala:802:19] .io_ptw_status_sd (_ptw_io_requestor_0_status_sd), // @[PTW.scala:802:19] .io_ptw_status_mpv (_ptw_io_requestor_0_status_mpv), // @[PTW.scala:802:19] .io_ptw_status_gva (_ptw_io_requestor_0_status_gva), // @[PTW.scala:802:19] .io_ptw_status_tsr (_ptw_io_requestor_0_status_tsr), // @[PTW.scala:802:19] .io_ptw_status_tw (_ptw_io_requestor_0_status_tw), // @[PTW.scala:802:19] .io_ptw_status_tvm (_ptw_io_requestor_0_status_tvm), // @[PTW.scala:802:19] .io_ptw_status_mxr (_ptw_io_requestor_0_status_mxr), // @[PTW.scala:802:19] .io_ptw_status_sum (_ptw_io_requestor_0_status_sum), // @[PTW.scala:802:19] .io_ptw_status_mprv (_ptw_io_requestor_0_status_mprv), // @[PTW.scala:802:19] .io_ptw_status_fs (_ptw_io_requestor_0_status_fs), // @[PTW.scala:802:19] .io_ptw_status_mpp (_ptw_io_requestor_0_status_mpp), // @[PTW.scala:802:19] .io_ptw_status_spp (_ptw_io_requestor_0_status_spp), // @[PTW.scala:802:19] .io_ptw_status_mpie (_ptw_io_requestor_0_status_mpie), // @[PTW.scala:802:19] .io_ptw_status_spie (_ptw_io_requestor_0_status_spie), // @[PTW.scala:802:19] .io_ptw_status_mie (_ptw_io_requestor_0_status_mie), // @[PTW.scala:802:19] .io_ptw_status_sie (_ptw_io_requestor_0_status_sie), // @[PTW.scala:802:19] .io_ptw_hstatus_spvp (_ptw_io_requestor_0_hstatus_spvp), // @[PTW.scala:802:19] .io_ptw_hstatus_spv (_ptw_io_requestor_0_hstatus_spv), // @[PTW.scala:802:19] .io_ptw_hstatus_gva (_ptw_io_requestor_0_hstatus_gva), // @[PTW.scala:802:19] .io_ptw_gstatus_debug (_ptw_io_requestor_0_gstatus_debug), // @[PTW.scala:802:19] .io_ptw_gstatus_cease (_ptw_io_requestor_0_gstatus_cease), // @[PTW.scala:802:19] .io_ptw_gstatus_wfi (_ptw_io_requestor_0_gstatus_wfi), // @[PTW.scala:802:19] .io_ptw_gstatus_isa (_ptw_io_requestor_0_gstatus_isa), // @[PTW.scala:802:19] .io_ptw_gstatus_dprv (_ptw_io_requestor_0_gstatus_dprv), // @[PTW.scala:802:19] .io_ptw_gstatus_dv (_ptw_io_requestor_0_gstatus_dv), // @[PTW.scala:802:19] .io_ptw_gstatus_prv (_ptw_io_requestor_0_gstatus_prv), // @[PTW.scala:802:19] .io_ptw_gstatus_v (_ptw_io_requestor_0_gstatus_v), // @[PTW.scala:802:19] .io_ptw_gstatus_sd (_ptw_io_requestor_0_gstatus_sd), // @[PTW.scala:802:19] .io_ptw_gstatus_zero2 (_ptw_io_requestor_0_gstatus_zero2), // @[PTW.scala:802:19] .io_ptw_gstatus_mpv (_ptw_io_requestor_0_gstatus_mpv), // @[PTW.scala:802:19] .io_ptw_gstatus_gva (_ptw_io_requestor_0_gstatus_gva), // @[PTW.scala:802:19] .io_ptw_gstatus_mbe (_ptw_io_requestor_0_gstatus_mbe), // @[PTW.scala:802:19] .io_ptw_gstatus_sbe (_ptw_io_requestor_0_gstatus_sbe), // @[PTW.scala:802:19] .io_ptw_gstatus_sxl (_ptw_io_requestor_0_gstatus_sxl), // @[PTW.scala:802:19] .io_ptw_gstatus_zero1 (_ptw_io_requestor_0_gstatus_zero1), // @[PTW.scala:802:19] .io_ptw_gstatus_tsr (_ptw_io_requestor_0_gstatus_tsr), // @[PTW.scala:802:19] .io_ptw_gstatus_tw (_ptw_io_requestor_0_gstatus_tw), // @[PTW.scala:802:19] .io_ptw_gstatus_tvm (_ptw_io_requestor_0_gstatus_tvm), // @[PTW.scala:802:19] .io_ptw_gstatus_mxr (_ptw_io_requestor_0_gstatus_mxr), // @[PTW.scala:802:19] .io_ptw_gstatus_sum (_ptw_io_requestor_0_gstatus_sum), // @[PTW.scala:802:19] .io_ptw_gstatus_mprv (_ptw_io_requestor_0_gstatus_mprv), // @[PTW.scala:802:19] .io_ptw_gstatus_fs (_ptw_io_requestor_0_gstatus_fs), // @[PTW.scala:802:19] .io_ptw_gstatus_mpp (_ptw_io_requestor_0_gstatus_mpp), // @[PTW.scala:802:19] .io_ptw_gstatus_vs (_ptw_io_requestor_0_gstatus_vs), // @[PTW.scala:802:19] .io_ptw_gstatus_spp (_ptw_io_requestor_0_gstatus_spp), // @[PTW.scala:802:19] .io_ptw_gstatus_mpie (_ptw_io_requestor_0_gstatus_mpie), // @[PTW.scala:802:19] .io_ptw_gstatus_ube (_ptw_io_requestor_0_gstatus_ube), // @[PTW.scala:802:19] .io_ptw_gstatus_spie (_ptw_io_requestor_0_gstatus_spie), // @[PTW.scala:802:19] .io_ptw_gstatus_upie (_ptw_io_requestor_0_gstatus_upie), // @[PTW.scala:802:19] .io_ptw_gstatus_mie (_ptw_io_requestor_0_gstatus_mie), // @[PTW.scala:802:19] .io_ptw_gstatus_hie (_ptw_io_requestor_0_gstatus_hie), // @[PTW.scala:802:19] .io_ptw_gstatus_sie (_ptw_io_requestor_0_gstatus_sie), // @[PTW.scala:802:19] .io_ptw_gstatus_uie (_ptw_io_requestor_0_gstatus_uie), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_l (_ptw_io_requestor_0_pmp_0_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_a (_ptw_io_requestor_0_pmp_0_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_x (_ptw_io_requestor_0_pmp_0_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_w (_ptw_io_requestor_0_pmp_0_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_r (_ptw_io_requestor_0_pmp_0_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_0_addr (_ptw_io_requestor_0_pmp_0_addr), // @[PTW.scala:802:19] .io_ptw_pmp_0_mask (_ptw_io_requestor_0_pmp_0_mask), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_l (_ptw_io_requestor_0_pmp_1_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_a (_ptw_io_requestor_0_pmp_1_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_x (_ptw_io_requestor_0_pmp_1_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_w (_ptw_io_requestor_0_pmp_1_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_r (_ptw_io_requestor_0_pmp_1_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_1_addr (_ptw_io_requestor_0_pmp_1_addr), // @[PTW.scala:802:19] .io_ptw_pmp_1_mask (_ptw_io_requestor_0_pmp_1_mask), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_l (_ptw_io_requestor_0_pmp_2_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_a (_ptw_io_requestor_0_pmp_2_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_x (_ptw_io_requestor_0_pmp_2_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_w (_ptw_io_requestor_0_pmp_2_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_r (_ptw_io_requestor_0_pmp_2_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_2_addr (_ptw_io_requestor_0_pmp_2_addr), // @[PTW.scala:802:19] .io_ptw_pmp_2_mask (_ptw_io_requestor_0_pmp_2_mask), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_l (_ptw_io_requestor_0_pmp_3_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_a (_ptw_io_requestor_0_pmp_3_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_x (_ptw_io_requestor_0_pmp_3_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_w (_ptw_io_requestor_0_pmp_3_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_r (_ptw_io_requestor_0_pmp_3_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_3_addr (_ptw_io_requestor_0_pmp_3_addr), // @[PTW.scala:802:19] .io_ptw_pmp_3_mask (_ptw_io_requestor_0_pmp_3_mask), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_l (_ptw_io_requestor_0_pmp_4_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_a (_ptw_io_requestor_0_pmp_4_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_x (_ptw_io_requestor_0_pmp_4_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_w (_ptw_io_requestor_0_pmp_4_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_r (_ptw_io_requestor_0_pmp_4_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_4_addr (_ptw_io_requestor_0_pmp_4_addr), // @[PTW.scala:802:19] .io_ptw_pmp_4_mask (_ptw_io_requestor_0_pmp_4_mask), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_l (_ptw_io_requestor_0_pmp_5_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_a (_ptw_io_requestor_0_pmp_5_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_x (_ptw_io_requestor_0_pmp_5_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_w (_ptw_io_requestor_0_pmp_5_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_r (_ptw_io_requestor_0_pmp_5_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_5_addr (_ptw_io_requestor_0_pmp_5_addr), // @[PTW.scala:802:19] .io_ptw_pmp_5_mask (_ptw_io_requestor_0_pmp_5_mask), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_l (_ptw_io_requestor_0_pmp_6_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_a (_ptw_io_requestor_0_pmp_6_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_x (_ptw_io_requestor_0_pmp_6_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_w (_ptw_io_requestor_0_pmp_6_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_r (_ptw_io_requestor_0_pmp_6_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_6_addr (_ptw_io_requestor_0_pmp_6_addr), // @[PTW.scala:802:19] .io_ptw_pmp_6_mask (_ptw_io_requestor_0_pmp_6_mask), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_l (_ptw_io_requestor_0_pmp_7_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_a (_ptw_io_requestor_0_pmp_7_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_x (_ptw_io_requestor_0_pmp_7_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_w (_ptw_io_requestor_0_pmp_7_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_r (_ptw_io_requestor_0_pmp_7_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_7_addr (_ptw_io_requestor_0_pmp_7_addr), // @[PTW.scala:802:19] .io_ptw_pmp_7_mask (_ptw_io_requestor_0_pmp_7_mask), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_ren (_ptw_io_requestor_0_customCSRs_csrs_0_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_wen (_ptw_io_requestor_0_customCSRs_csrs_0_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_wdata (_ptw_io_requestor_0_customCSRs_csrs_0_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_value (_ptw_io_requestor_0_customCSRs_csrs_0_value), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_ren (_ptw_io_requestor_0_customCSRs_csrs_1_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_wen (_ptw_io_requestor_0_customCSRs_csrs_1_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_wdata (_ptw_io_requestor_0_customCSRs_csrs_1_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_value (_ptw_io_requestor_0_customCSRs_csrs_1_value), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_ren (_ptw_io_requestor_0_customCSRs_csrs_2_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_wen (_ptw_io_requestor_0_customCSRs_csrs_2_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_wdata (_ptw_io_requestor_0_customCSRs_csrs_2_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_value (_ptw_io_requestor_0_customCSRs_csrs_2_value), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_ren (_ptw_io_requestor_0_customCSRs_csrs_3_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_wen (_ptw_io_requestor_0_customCSRs_csrs_3_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_wdata (_ptw_io_requestor_0_customCSRs_csrs_3_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_value (_ptw_io_requestor_0_customCSRs_csrs_3_value) // @[PTW.scala:802:19] ); // @[HellaCache.scala:278:43] Frontend_4 frontend ( // @[Frontend.scala:393:28] .clock (clock), .reset (reset), .auto_icache_master_out_a_ready (widget_1_auto_anon_in_a_ready), // @[WidthWidget.scala:27:9] .auto_icache_master_out_a_valid (widget_1_auto_anon_in_a_valid), .auto_icache_master_out_a_bits_address (widget_1_auto_anon_in_a_bits_address), .auto_icache_master_out_d_valid (widget_1_auto_anon_in_d_valid), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_opcode (widget_1_auto_anon_in_d_bits_opcode), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_param (widget_1_auto_anon_in_d_bits_param), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_size (widget_1_auto_anon_in_d_bits_size), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_sink (widget_1_auto_anon_in_d_bits_sink), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_denied (widget_1_auto_anon_in_d_bits_denied), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_data (widget_1_auto_anon_in_d_bits_data), // @[WidthWidget.scala:27:9] .auto_icache_master_out_d_bits_corrupt (widget_1_auto_anon_in_d_bits_corrupt), // @[WidthWidget.scala:27:9] .io_cpu_might_request (_core_io_imem_might_request), // @[RocketTile.scala:147:20] .io_cpu_req_valid (_core_io_imem_req_valid), // @[RocketTile.scala:147:20] .io_cpu_req_bits_pc (_core_io_imem_req_bits_pc), // @[RocketTile.scala:147:20] .io_cpu_req_bits_speculative (_core_io_imem_req_bits_speculative), // @[RocketTile.scala:147:20] .io_cpu_sfence_valid (_core_io_imem_sfence_valid), // @[RocketTile.scala:147:20] .io_cpu_sfence_bits_rs1 (_core_io_imem_sfence_bits_rs1), // @[RocketTile.scala:147:20] .io_cpu_sfence_bits_rs2 (_core_io_imem_sfence_bits_rs2), // @[RocketTile.scala:147:20] .io_cpu_sfence_bits_addr (_core_io_imem_sfence_bits_addr), // @[RocketTile.scala:147:20] .io_cpu_sfence_bits_asid (_core_io_imem_sfence_bits_asid), // @[RocketTile.scala:147:20] .io_cpu_sfence_bits_hv (_core_io_imem_sfence_bits_hv), // @[RocketTile.scala:147:20] .io_cpu_sfence_bits_hg (_core_io_imem_sfence_bits_hg), // @[RocketTile.scala:147:20] .io_cpu_resp_ready (_core_io_imem_resp_ready), // @[RocketTile.scala:147:20] .io_cpu_resp_valid (_frontend_io_cpu_resp_valid), .io_cpu_resp_bits_btb_cfiType (_frontend_io_cpu_resp_bits_btb_cfiType), .io_cpu_resp_bits_btb_taken (_frontend_io_cpu_resp_bits_btb_taken), .io_cpu_resp_bits_btb_mask (_frontend_io_cpu_resp_bits_btb_mask), .io_cpu_resp_bits_btb_bridx (_frontend_io_cpu_resp_bits_btb_bridx), .io_cpu_resp_bits_btb_target (_frontend_io_cpu_resp_bits_btb_target), .io_cpu_resp_bits_btb_entry (_frontend_io_cpu_resp_bits_btb_entry), .io_cpu_resp_bits_btb_bht_history (_frontend_io_cpu_resp_bits_btb_bht_history), .io_cpu_resp_bits_btb_bht_value (_frontend_io_cpu_resp_bits_btb_bht_value), .io_cpu_resp_bits_pc (_frontend_io_cpu_resp_bits_pc), .io_cpu_resp_bits_data (_frontend_io_cpu_resp_bits_data), .io_cpu_resp_bits_mask (_frontend_io_cpu_resp_bits_mask), .io_cpu_resp_bits_xcpt_pf_inst (_frontend_io_cpu_resp_bits_xcpt_pf_inst), .io_cpu_resp_bits_xcpt_gf_inst (_frontend_io_cpu_resp_bits_xcpt_gf_inst), .io_cpu_resp_bits_xcpt_ae_inst (_frontend_io_cpu_resp_bits_xcpt_ae_inst), .io_cpu_resp_bits_replay (_frontend_io_cpu_resp_bits_replay), .io_cpu_gpa_valid (_frontend_io_cpu_gpa_valid), .io_cpu_gpa_bits (_frontend_io_cpu_gpa_bits), .io_cpu_gpa_is_pte (_frontend_io_cpu_gpa_is_pte), .io_cpu_btb_update_valid (_core_io_imem_btb_update_valid), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_cfiType (_core_io_imem_btb_update_bits_prediction_cfiType), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_taken (_core_io_imem_btb_update_bits_prediction_taken), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_mask (_core_io_imem_btb_update_bits_prediction_mask), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_bridx (_core_io_imem_btb_update_bits_prediction_bridx), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_target (_core_io_imem_btb_update_bits_prediction_target), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_entry (_core_io_imem_btb_update_bits_prediction_entry), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_bht_history (_core_io_imem_btb_update_bits_prediction_bht_history), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_prediction_bht_value (_core_io_imem_btb_update_bits_prediction_bht_value), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_pc (_core_io_imem_btb_update_bits_pc), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_target (_core_io_imem_btb_update_bits_target), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_isValid (_core_io_imem_btb_update_bits_isValid), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_br_pc (_core_io_imem_btb_update_bits_br_pc), // @[RocketTile.scala:147:20] .io_cpu_btb_update_bits_cfiType (_core_io_imem_btb_update_bits_cfiType), // @[RocketTile.scala:147:20] .io_cpu_bht_update_valid (_core_io_imem_bht_update_valid), // @[RocketTile.scala:147:20] .io_cpu_bht_update_bits_prediction_history (_core_io_imem_bht_update_bits_prediction_history), // @[RocketTile.scala:147:20] .io_cpu_bht_update_bits_prediction_value (_core_io_imem_bht_update_bits_prediction_value), // @[RocketTile.scala:147:20] .io_cpu_bht_update_bits_pc (_core_io_imem_bht_update_bits_pc), // @[RocketTile.scala:147:20] .io_cpu_bht_update_bits_branch (_core_io_imem_bht_update_bits_branch), // @[RocketTile.scala:147:20] .io_cpu_bht_update_bits_taken (_core_io_imem_bht_update_bits_taken), // @[RocketTile.scala:147:20] .io_cpu_bht_update_bits_mispredict (_core_io_imem_bht_update_bits_mispredict), // @[RocketTile.scala:147:20] .io_cpu_flush_icache (_core_io_imem_flush_icache), // @[RocketTile.scala:147:20] .io_cpu_npc (_frontend_io_cpu_npc), .io_cpu_perf_acquire (_frontend_io_cpu_perf_acquire), .io_cpu_perf_tlbMiss (_frontend_io_cpu_perf_tlbMiss), .io_cpu_progress (_core_io_imem_progress), // @[RocketTile.scala:147:20] .io_ptw_req_ready (_ptw_io_requestor_1_req_ready), // @[PTW.scala:802:19] .io_ptw_req_valid (_frontend_io_ptw_req_valid), .io_ptw_req_bits_valid (_frontend_io_ptw_req_bits_valid), .io_ptw_req_bits_bits_addr (_frontend_io_ptw_req_bits_bits_addr), .io_ptw_req_bits_bits_need_gpa (_frontend_io_ptw_req_bits_bits_need_gpa), .io_ptw_resp_valid (_ptw_io_requestor_1_resp_valid), // @[PTW.scala:802:19] .io_ptw_resp_bits_ae_ptw (_ptw_io_requestor_1_resp_bits_ae_ptw), // @[PTW.scala:802:19] .io_ptw_resp_bits_ae_final (_ptw_io_requestor_1_resp_bits_ae_final), // @[PTW.scala:802:19] .io_ptw_resp_bits_pf (_ptw_io_requestor_1_resp_bits_pf), // @[PTW.scala:802:19] .io_ptw_resp_bits_gf (_ptw_io_requestor_1_resp_bits_gf), // @[PTW.scala:802:19] .io_ptw_resp_bits_hr (_ptw_io_requestor_1_resp_bits_hr), // @[PTW.scala:802:19] .io_ptw_resp_bits_hw (_ptw_io_requestor_1_resp_bits_hw), // @[PTW.scala:802:19] .io_ptw_resp_bits_hx (_ptw_io_requestor_1_resp_bits_hx), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_reserved_for_future (_ptw_io_requestor_1_resp_bits_pte_reserved_for_future), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_ppn (_ptw_io_requestor_1_resp_bits_pte_ppn), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_reserved_for_software (_ptw_io_requestor_1_resp_bits_pte_reserved_for_software), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_d (_ptw_io_requestor_1_resp_bits_pte_d), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_a (_ptw_io_requestor_1_resp_bits_pte_a), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_g (_ptw_io_requestor_1_resp_bits_pte_g), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_u (_ptw_io_requestor_1_resp_bits_pte_u), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_x (_ptw_io_requestor_1_resp_bits_pte_x), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_w (_ptw_io_requestor_1_resp_bits_pte_w), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_r (_ptw_io_requestor_1_resp_bits_pte_r), // @[PTW.scala:802:19] .io_ptw_resp_bits_pte_v (_ptw_io_requestor_1_resp_bits_pte_v), // @[PTW.scala:802:19] .io_ptw_resp_bits_level (_ptw_io_requestor_1_resp_bits_level), // @[PTW.scala:802:19] .io_ptw_resp_bits_homogeneous (_ptw_io_requestor_1_resp_bits_homogeneous), // @[PTW.scala:802:19] .io_ptw_resp_bits_gpa_valid (_ptw_io_requestor_1_resp_bits_gpa_valid), // @[PTW.scala:802:19] .io_ptw_resp_bits_gpa_bits (_ptw_io_requestor_1_resp_bits_gpa_bits), // @[PTW.scala:802:19] .io_ptw_resp_bits_gpa_is_pte (_ptw_io_requestor_1_resp_bits_gpa_is_pte), // @[PTW.scala:802:19] .io_ptw_ptbr_mode (_ptw_io_requestor_1_ptbr_mode), // @[PTW.scala:802:19] .io_ptw_ptbr_ppn (_ptw_io_requestor_1_ptbr_ppn), // @[PTW.scala:802:19] .io_ptw_status_debug (_ptw_io_requestor_1_status_debug), // @[PTW.scala:802:19] .io_ptw_status_cease (_ptw_io_requestor_1_status_cease), // @[PTW.scala:802:19] .io_ptw_status_wfi (_ptw_io_requestor_1_status_wfi), // @[PTW.scala:802:19] .io_ptw_status_isa (_ptw_io_requestor_1_status_isa), // @[PTW.scala:802:19] .io_ptw_status_dprv (_ptw_io_requestor_1_status_dprv), // @[PTW.scala:802:19] .io_ptw_status_dv (_ptw_io_requestor_1_status_dv), // @[PTW.scala:802:19] .io_ptw_status_prv (_ptw_io_requestor_1_status_prv), // @[PTW.scala:802:19] .io_ptw_status_v (_ptw_io_requestor_1_status_v), // @[PTW.scala:802:19] .io_ptw_status_sd (_ptw_io_requestor_1_status_sd), // @[PTW.scala:802:19] .io_ptw_status_mpv (_ptw_io_requestor_1_status_mpv), // @[PTW.scala:802:19] .io_ptw_status_gva (_ptw_io_requestor_1_status_gva), // @[PTW.scala:802:19] .io_ptw_status_tsr (_ptw_io_requestor_1_status_tsr), // @[PTW.scala:802:19] .io_ptw_status_tw (_ptw_io_requestor_1_status_tw), // @[PTW.scala:802:19] .io_ptw_status_tvm (_ptw_io_requestor_1_status_tvm), // @[PTW.scala:802:19] .io_ptw_status_mxr (_ptw_io_requestor_1_status_mxr), // @[PTW.scala:802:19] .io_ptw_status_sum (_ptw_io_requestor_1_status_sum), // @[PTW.scala:802:19] .io_ptw_status_mprv (_ptw_io_requestor_1_status_mprv), // @[PTW.scala:802:19] .io_ptw_status_fs (_ptw_io_requestor_1_status_fs), // @[PTW.scala:802:19] .io_ptw_status_mpp (_ptw_io_requestor_1_status_mpp), // @[PTW.scala:802:19] .io_ptw_status_spp (_ptw_io_requestor_1_status_spp), // @[PTW.scala:802:19] .io_ptw_status_mpie (_ptw_io_requestor_1_status_mpie), // @[PTW.scala:802:19] .io_ptw_status_spie (_ptw_io_requestor_1_status_spie), // @[PTW.scala:802:19] .io_ptw_status_mie (_ptw_io_requestor_1_status_mie), // @[PTW.scala:802:19] .io_ptw_status_sie (_ptw_io_requestor_1_status_sie), // @[PTW.scala:802:19] .io_ptw_hstatus_spvp (_ptw_io_requestor_1_hstatus_spvp), // @[PTW.scala:802:19] .io_ptw_hstatus_spv (_ptw_io_requestor_1_hstatus_spv), // @[PTW.scala:802:19] .io_ptw_hstatus_gva (_ptw_io_requestor_1_hstatus_gva), // @[PTW.scala:802:19] .io_ptw_gstatus_debug (_ptw_io_requestor_1_gstatus_debug), // @[PTW.scala:802:19] .io_ptw_gstatus_cease (_ptw_io_requestor_1_gstatus_cease), // @[PTW.scala:802:19] .io_ptw_gstatus_wfi (_ptw_io_requestor_1_gstatus_wfi), // @[PTW.scala:802:19] .io_ptw_gstatus_isa (_ptw_io_requestor_1_gstatus_isa), // @[PTW.scala:802:19] .io_ptw_gstatus_dprv (_ptw_io_requestor_1_gstatus_dprv), // @[PTW.scala:802:19] .io_ptw_gstatus_dv (_ptw_io_requestor_1_gstatus_dv), // @[PTW.scala:802:19] .io_ptw_gstatus_prv (_ptw_io_requestor_1_gstatus_prv), // @[PTW.scala:802:19] .io_ptw_gstatus_v (_ptw_io_requestor_1_gstatus_v), // @[PTW.scala:802:19] .io_ptw_gstatus_sd (_ptw_io_requestor_1_gstatus_sd), // @[PTW.scala:802:19] .io_ptw_gstatus_zero2 (_ptw_io_requestor_1_gstatus_zero2), // @[PTW.scala:802:19] .io_ptw_gstatus_mpv (_ptw_io_requestor_1_gstatus_mpv), // @[PTW.scala:802:19] .io_ptw_gstatus_gva (_ptw_io_requestor_1_gstatus_gva), // @[PTW.scala:802:19] .io_ptw_gstatus_mbe (_ptw_io_requestor_1_gstatus_mbe), // @[PTW.scala:802:19] .io_ptw_gstatus_sbe (_ptw_io_requestor_1_gstatus_sbe), // @[PTW.scala:802:19] .io_ptw_gstatus_sxl (_ptw_io_requestor_1_gstatus_sxl), // @[PTW.scala:802:19] .io_ptw_gstatus_zero1 (_ptw_io_requestor_1_gstatus_zero1), // @[PTW.scala:802:19] .io_ptw_gstatus_tsr (_ptw_io_requestor_1_gstatus_tsr), // @[PTW.scala:802:19] .io_ptw_gstatus_tw (_ptw_io_requestor_1_gstatus_tw), // @[PTW.scala:802:19] .io_ptw_gstatus_tvm (_ptw_io_requestor_1_gstatus_tvm), // @[PTW.scala:802:19] .io_ptw_gstatus_mxr (_ptw_io_requestor_1_gstatus_mxr), // @[PTW.scala:802:19] .io_ptw_gstatus_sum (_ptw_io_requestor_1_gstatus_sum), // @[PTW.scala:802:19] .io_ptw_gstatus_mprv (_ptw_io_requestor_1_gstatus_mprv), // @[PTW.scala:802:19] .io_ptw_gstatus_fs (_ptw_io_requestor_1_gstatus_fs), // @[PTW.scala:802:19] .io_ptw_gstatus_mpp (_ptw_io_requestor_1_gstatus_mpp), // @[PTW.scala:802:19] .io_ptw_gstatus_vs (_ptw_io_requestor_1_gstatus_vs), // @[PTW.scala:802:19] .io_ptw_gstatus_spp (_ptw_io_requestor_1_gstatus_spp), // @[PTW.scala:802:19] .io_ptw_gstatus_mpie (_ptw_io_requestor_1_gstatus_mpie), // @[PTW.scala:802:19] .io_ptw_gstatus_ube (_ptw_io_requestor_1_gstatus_ube), // @[PTW.scala:802:19] .io_ptw_gstatus_spie (_ptw_io_requestor_1_gstatus_spie), // @[PTW.scala:802:19] .io_ptw_gstatus_upie (_ptw_io_requestor_1_gstatus_upie), // @[PTW.scala:802:19] .io_ptw_gstatus_mie (_ptw_io_requestor_1_gstatus_mie), // @[PTW.scala:802:19] .io_ptw_gstatus_hie (_ptw_io_requestor_1_gstatus_hie), // @[PTW.scala:802:19] .io_ptw_gstatus_sie (_ptw_io_requestor_1_gstatus_sie), // @[PTW.scala:802:19] .io_ptw_gstatus_uie (_ptw_io_requestor_1_gstatus_uie), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_l (_ptw_io_requestor_1_pmp_0_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_a (_ptw_io_requestor_1_pmp_0_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_x (_ptw_io_requestor_1_pmp_0_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_w (_ptw_io_requestor_1_pmp_0_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_0_cfg_r (_ptw_io_requestor_1_pmp_0_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_0_addr (_ptw_io_requestor_1_pmp_0_addr), // @[PTW.scala:802:19] .io_ptw_pmp_0_mask (_ptw_io_requestor_1_pmp_0_mask), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_l (_ptw_io_requestor_1_pmp_1_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_a (_ptw_io_requestor_1_pmp_1_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_x (_ptw_io_requestor_1_pmp_1_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_w (_ptw_io_requestor_1_pmp_1_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_1_cfg_r (_ptw_io_requestor_1_pmp_1_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_1_addr (_ptw_io_requestor_1_pmp_1_addr), // @[PTW.scala:802:19] .io_ptw_pmp_1_mask (_ptw_io_requestor_1_pmp_1_mask), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_l (_ptw_io_requestor_1_pmp_2_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_a (_ptw_io_requestor_1_pmp_2_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_x (_ptw_io_requestor_1_pmp_2_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_w (_ptw_io_requestor_1_pmp_2_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_2_cfg_r (_ptw_io_requestor_1_pmp_2_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_2_addr (_ptw_io_requestor_1_pmp_2_addr), // @[PTW.scala:802:19] .io_ptw_pmp_2_mask (_ptw_io_requestor_1_pmp_2_mask), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_l (_ptw_io_requestor_1_pmp_3_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_a (_ptw_io_requestor_1_pmp_3_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_x (_ptw_io_requestor_1_pmp_3_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_w (_ptw_io_requestor_1_pmp_3_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_3_cfg_r (_ptw_io_requestor_1_pmp_3_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_3_addr (_ptw_io_requestor_1_pmp_3_addr), // @[PTW.scala:802:19] .io_ptw_pmp_3_mask (_ptw_io_requestor_1_pmp_3_mask), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_l (_ptw_io_requestor_1_pmp_4_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_a (_ptw_io_requestor_1_pmp_4_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_x (_ptw_io_requestor_1_pmp_4_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_w (_ptw_io_requestor_1_pmp_4_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_4_cfg_r (_ptw_io_requestor_1_pmp_4_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_4_addr (_ptw_io_requestor_1_pmp_4_addr), // @[PTW.scala:802:19] .io_ptw_pmp_4_mask (_ptw_io_requestor_1_pmp_4_mask), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_l (_ptw_io_requestor_1_pmp_5_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_a (_ptw_io_requestor_1_pmp_5_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_x (_ptw_io_requestor_1_pmp_5_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_w (_ptw_io_requestor_1_pmp_5_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_5_cfg_r (_ptw_io_requestor_1_pmp_5_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_5_addr (_ptw_io_requestor_1_pmp_5_addr), // @[PTW.scala:802:19] .io_ptw_pmp_5_mask (_ptw_io_requestor_1_pmp_5_mask), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_l (_ptw_io_requestor_1_pmp_6_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_a (_ptw_io_requestor_1_pmp_6_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_x (_ptw_io_requestor_1_pmp_6_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_w (_ptw_io_requestor_1_pmp_6_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_6_cfg_r (_ptw_io_requestor_1_pmp_6_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_6_addr (_ptw_io_requestor_1_pmp_6_addr), // @[PTW.scala:802:19] .io_ptw_pmp_6_mask (_ptw_io_requestor_1_pmp_6_mask), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_l (_ptw_io_requestor_1_pmp_7_cfg_l), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_a (_ptw_io_requestor_1_pmp_7_cfg_a), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_x (_ptw_io_requestor_1_pmp_7_cfg_x), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_w (_ptw_io_requestor_1_pmp_7_cfg_w), // @[PTW.scala:802:19] .io_ptw_pmp_7_cfg_r (_ptw_io_requestor_1_pmp_7_cfg_r), // @[PTW.scala:802:19] .io_ptw_pmp_7_addr (_ptw_io_requestor_1_pmp_7_addr), // @[PTW.scala:802:19] .io_ptw_pmp_7_mask (_ptw_io_requestor_1_pmp_7_mask), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_ren (_ptw_io_requestor_1_customCSRs_csrs_0_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_wen (_ptw_io_requestor_1_customCSRs_csrs_0_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_wdata (_ptw_io_requestor_1_customCSRs_csrs_0_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_value (_ptw_io_requestor_1_customCSRs_csrs_0_value), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_ren (_ptw_io_requestor_1_customCSRs_csrs_1_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_wen (_ptw_io_requestor_1_customCSRs_csrs_1_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_wdata (_ptw_io_requestor_1_customCSRs_csrs_1_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_1_value (_ptw_io_requestor_1_customCSRs_csrs_1_value), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_ren (_ptw_io_requestor_1_customCSRs_csrs_2_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_wen (_ptw_io_requestor_1_customCSRs_csrs_2_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_wdata (_ptw_io_requestor_1_customCSRs_csrs_2_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_2_value (_ptw_io_requestor_1_customCSRs_csrs_2_value), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_ren (_ptw_io_requestor_1_customCSRs_csrs_3_ren), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_wen (_ptw_io_requestor_1_customCSRs_csrs_3_wen), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_wdata (_ptw_io_requestor_1_customCSRs_csrs_3_wdata), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_3_value (_ptw_io_requestor_1_customCSRs_csrs_3_value) // @[PTW.scala:802:19] ); // @[Frontend.scala:393:28] TLFragmenter_4 fragmenter ( // @[Fragmenter.scala:345:34] .clock (clock), .reset (reset) ); // @[Fragmenter.scala:345:34] FPU_4 fpuOpt ( // @[RocketTile.scala:242:62] .clock (clock), .reset (reset), .io_hartid (_core_io_fpu_hartid), // @[RocketTile.scala:147:20] .io_time (_core_io_fpu_time), // @[RocketTile.scala:147:20] .io_inst (_core_io_fpu_inst), // @[RocketTile.scala:147:20] .io_fromint_data (_core_io_fpu_fromint_data), // @[RocketTile.scala:147:20] .io_fcsr_rm (_core_io_fpu_fcsr_rm), // @[RocketTile.scala:147:20] .io_fcsr_flags_valid (_fpuOpt_io_fcsr_flags_valid), .io_fcsr_flags_bits (_fpuOpt_io_fcsr_flags_bits), .io_store_data (_fpuOpt_io_store_data), .io_toint_data (_fpuOpt_io_toint_data), .io_ll_resp_val (_core_io_fpu_ll_resp_val), // @[RocketTile.scala:147:20] .io_ll_resp_type (_core_io_fpu_ll_resp_type), // @[RocketTile.scala:147:20] .io_ll_resp_tag (_core_io_fpu_ll_resp_tag), // @[RocketTile.scala:147:20] .io_ll_resp_data (_core_io_fpu_ll_resp_data), // @[RocketTile.scala:147:20] .io_valid (_core_io_fpu_valid), // @[RocketTile.scala:147:20] .io_fcsr_rdy (_fpuOpt_io_fcsr_rdy), .io_nack_mem (_fpuOpt_io_nack_mem), .io_illegal_rm (_fpuOpt_io_illegal_rm), .io_killx (_core_io_fpu_killx), // @[RocketTile.scala:147:20] .io_killm (_core_io_fpu_killm), // @[RocketTile.scala:147:20] .io_dec_ldst (_fpuOpt_io_dec_ldst), .io_dec_wen (_fpuOpt_io_dec_wen), .io_dec_ren1 (_fpuOpt_io_dec_ren1), .io_dec_ren2 (_fpuOpt_io_dec_ren2), .io_dec_ren3 (_fpuOpt_io_dec_ren3), .io_dec_swap12 (_fpuOpt_io_dec_swap12), .io_dec_swap23 (_fpuOpt_io_dec_swap23), .io_dec_typeTagIn (_fpuOpt_io_dec_typeTagIn), .io_dec_typeTagOut (_fpuOpt_io_dec_typeTagOut), .io_dec_fromint (_fpuOpt_io_dec_fromint), .io_dec_toint (_fpuOpt_io_dec_toint), .io_dec_fastpipe (_fpuOpt_io_dec_fastpipe), .io_dec_fma (_fpuOpt_io_dec_fma), .io_dec_div (_fpuOpt_io_dec_div), .io_dec_sqrt (_fpuOpt_io_dec_sqrt), .io_dec_wflags (_fpuOpt_io_dec_wflags), .io_dec_vec (_fpuOpt_io_dec_vec), .io_sboard_set (_fpuOpt_io_sboard_set), .io_sboard_clr (_fpuOpt_io_sboard_clr), .io_sboard_clra (_fpuOpt_io_sboard_clra), .io_keep_clock_enabled (_core_io_fpu_keep_clock_enabled) // @[RocketTile.scala:147:20] ); // @[RocketTile.scala:242:62] HellaCacheArbiter_4 dcacheArb ( // @[HellaCache.scala:292:25] .clock (clock), .reset (reset), .io_requestor_0_req_ready (_dcacheArb_io_requestor_0_req_ready), .io_requestor_0_req_valid (_ptw_io_mem_req_valid), // @[PTW.scala:802:19] .io_requestor_0_req_bits_addr (_ptw_io_mem_req_bits_addr), // @[PTW.scala:802:19] .io_requestor_0_req_bits_dv (_ptw_io_mem_req_bits_dv), // @[PTW.scala:802:19] .io_requestor_0_s1_kill (_ptw_io_mem_s1_kill), // @[PTW.scala:802:19] .io_requestor_0_s2_nack (_dcacheArb_io_requestor_0_s2_nack), .io_requestor_0_s2_nack_cause_raw (_dcacheArb_io_requestor_0_s2_nack_cause_raw), .io_requestor_0_s2_uncached (_dcacheArb_io_requestor_0_s2_uncached), .io_requestor_0_s2_paddr (_dcacheArb_io_requestor_0_s2_paddr), .io_requestor_0_resp_valid (_dcacheArb_io_requestor_0_resp_valid), .io_requestor_0_resp_bits_addr (_dcacheArb_io_requestor_0_resp_bits_addr), .io_requestor_0_resp_bits_tag (_dcacheArb_io_requestor_0_resp_bits_tag), .io_requestor_0_resp_bits_cmd (_dcacheArb_io_requestor_0_resp_bits_cmd), .io_requestor_0_resp_bits_size (_dcacheArb_io_requestor_0_resp_bits_size), .io_requestor_0_resp_bits_signed (_dcacheArb_io_requestor_0_resp_bits_signed), .io_requestor_0_resp_bits_dprv (_dcacheArb_io_requestor_0_resp_bits_dprv), .io_requestor_0_resp_bits_dv (_dcacheArb_io_requestor_0_resp_bits_dv), .io_requestor_0_resp_bits_data (_dcacheArb_io_requestor_0_resp_bits_data), .io_requestor_0_resp_bits_mask (_dcacheArb_io_requestor_0_resp_bits_mask), .io_requestor_0_resp_bits_replay (_dcacheArb_io_requestor_0_resp_bits_replay), .io_requestor_0_resp_bits_has_data (_dcacheArb_io_requestor_0_resp_bits_has_data), .io_requestor_0_resp_bits_data_word_bypass (_dcacheArb_io_requestor_0_resp_bits_data_word_bypass), .io_requestor_0_resp_bits_data_raw (_dcacheArb_io_requestor_0_resp_bits_data_raw), .io_requestor_0_resp_bits_store_data (_dcacheArb_io_requestor_0_resp_bits_store_data), .io_requestor_0_replay_next (_dcacheArb_io_requestor_0_replay_next), .io_requestor_0_s2_xcpt_ma_ld (_dcacheArb_io_requestor_0_s2_xcpt_ma_ld), .io_requestor_0_s2_xcpt_ma_st (_dcacheArb_io_requestor_0_s2_xcpt_ma_st), .io_requestor_0_s2_xcpt_pf_ld (_dcacheArb_io_requestor_0_s2_xcpt_pf_ld), .io_requestor_0_s2_xcpt_pf_st (_dcacheArb_io_requestor_0_s2_xcpt_pf_st), .io_requestor_0_s2_xcpt_ae_ld (_dcacheArb_io_requestor_0_s2_xcpt_ae_ld), .io_requestor_0_s2_xcpt_ae_st (_dcacheArb_io_requestor_0_s2_xcpt_ae_st), .io_requestor_0_s2_gpa (_dcacheArb_io_requestor_0_s2_gpa), .io_requestor_0_ordered (_dcacheArb_io_requestor_0_ordered), .io_requestor_0_store_pending (_dcacheArb_io_requestor_0_store_pending), .io_requestor_0_perf_acquire (_dcacheArb_io_requestor_0_perf_acquire), .io_requestor_0_perf_release (_dcacheArb_io_requestor_0_perf_release), .io_requestor_0_perf_grant (_dcacheArb_io_requestor_0_perf_grant), .io_requestor_0_perf_tlbMiss (_dcacheArb_io_requestor_0_perf_tlbMiss), .io_requestor_0_perf_blocked (_dcacheArb_io_requestor_0_perf_blocked), .io_requestor_0_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad), .io_requestor_0_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW), .io_requestor_0_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad), .io_requestor_0_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad), .io_requestor_0_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore), .io_requestor_1_req_ready (_dcacheArb_io_requestor_1_req_ready), .io_requestor_1_req_valid (_core_io_dmem_req_valid), // @[RocketTile.scala:147:20] .io_requestor_1_req_bits_addr (_core_io_dmem_req_bits_addr), // @[RocketTile.scala:147:20] .io_requestor_1_req_bits_tag (_core_io_dmem_req_bits_tag), // @[RocketTile.scala:147:20] .io_requestor_1_req_bits_cmd (_core_io_dmem_req_bits_cmd), // @[RocketTile.scala:147:20] .io_requestor_1_req_bits_size (_core_io_dmem_req_bits_size), // @[RocketTile.scala:147:20] .io_requestor_1_req_bits_signed (_core_io_dmem_req_bits_signed), // @[RocketTile.scala:147:20] .io_requestor_1_req_bits_dprv (_core_io_dmem_req_bits_dprv), // @[RocketTile.scala:147:20] .io_requestor_1_req_bits_dv (_core_io_dmem_req_bits_dv), // @[RocketTile.scala:147:20] .io_requestor_1_req_bits_no_resp (_core_io_dmem_req_bits_no_resp), // @[RocketTile.scala:147:20] .io_requestor_1_s1_kill (_core_io_dmem_s1_kill), // @[RocketTile.scala:147:20] .io_requestor_1_s1_data_data (_core_io_dmem_s1_data_data), // @[RocketTile.scala:147:20] .io_requestor_1_s2_nack (_dcacheArb_io_requestor_1_s2_nack), .io_requestor_1_s2_nack_cause_raw (_dcacheArb_io_requestor_1_s2_nack_cause_raw), .io_requestor_1_s2_uncached (_dcacheArb_io_requestor_1_s2_uncached), .io_requestor_1_s2_paddr (_dcacheArb_io_requestor_1_s2_paddr), .io_requestor_1_resp_valid (_dcacheArb_io_requestor_1_resp_valid), .io_requestor_1_resp_bits_addr (_dcacheArb_io_requestor_1_resp_bits_addr), .io_requestor_1_resp_bits_tag (_dcacheArb_io_requestor_1_resp_bits_tag), .io_requestor_1_resp_bits_cmd (_dcacheArb_io_requestor_1_resp_bits_cmd), .io_requestor_1_resp_bits_size (_dcacheArb_io_requestor_1_resp_bits_size), .io_requestor_1_resp_bits_signed (_dcacheArb_io_requestor_1_resp_bits_signed), .io_requestor_1_resp_bits_dprv (_dcacheArb_io_requestor_1_resp_bits_dprv), .io_requestor_1_resp_bits_dv (_dcacheArb_io_requestor_1_resp_bits_dv), .io_requestor_1_resp_bits_data (_dcacheArb_io_requestor_1_resp_bits_data), .io_requestor_1_resp_bits_mask (_dcacheArb_io_requestor_1_resp_bits_mask), .io_requestor_1_resp_bits_replay (_dcacheArb_io_requestor_1_resp_bits_replay), .io_requestor_1_resp_bits_has_data (_dcacheArb_io_requestor_1_resp_bits_has_data), .io_requestor_1_resp_bits_data_word_bypass (_dcacheArb_io_requestor_1_resp_bits_data_word_bypass), .io_requestor_1_resp_bits_data_raw (_dcacheArb_io_requestor_1_resp_bits_data_raw), .io_requestor_1_resp_bits_store_data (_dcacheArb_io_requestor_1_resp_bits_store_data), .io_requestor_1_replay_next (_dcacheArb_io_requestor_1_replay_next), .io_requestor_1_s2_xcpt_ma_ld (_dcacheArb_io_requestor_1_s2_xcpt_ma_ld), .io_requestor_1_s2_xcpt_ma_st (_dcacheArb_io_requestor_1_s2_xcpt_ma_st), .io_requestor_1_s2_xcpt_pf_ld (_dcacheArb_io_requestor_1_s2_xcpt_pf_ld), .io_requestor_1_s2_xcpt_pf_st (_dcacheArb_io_requestor_1_s2_xcpt_pf_st), .io_requestor_1_s2_xcpt_ae_ld (_dcacheArb_io_requestor_1_s2_xcpt_ae_ld), .io_requestor_1_s2_xcpt_ae_st (_dcacheArb_io_requestor_1_s2_xcpt_ae_st), .io_requestor_1_s2_gpa (_dcacheArb_io_requestor_1_s2_gpa), .io_requestor_1_ordered (_dcacheArb_io_requestor_1_ordered), .io_requestor_1_store_pending (_dcacheArb_io_requestor_1_store_pending), .io_requestor_1_perf_acquire (_dcacheArb_io_requestor_1_perf_acquire), .io_requestor_1_perf_release (_dcacheArb_io_requestor_1_perf_release), .io_requestor_1_perf_grant (_dcacheArb_io_requestor_1_perf_grant), .io_requestor_1_perf_tlbMiss (_dcacheArb_io_requestor_1_perf_tlbMiss), .io_requestor_1_perf_blocked (_dcacheArb_io_requestor_1_perf_blocked), .io_requestor_1_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad), .io_requestor_1_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW), .io_requestor_1_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad), .io_requestor_1_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad), .io_requestor_1_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore), .io_requestor_1_keep_clock_enabled (_core_io_dmem_keep_clock_enabled), // @[RocketTile.scala:147:20] .io_mem_req_ready (_dcache_io_cpu_req_ready), // @[HellaCache.scala:278:43] .io_mem_req_valid (_dcacheArb_io_mem_req_valid), .io_mem_req_bits_addr (_dcacheArb_io_mem_req_bits_addr), .io_mem_req_bits_tag (_dcacheArb_io_mem_req_bits_tag), .io_mem_req_bits_cmd (_dcacheArb_io_mem_req_bits_cmd), .io_mem_req_bits_size (_dcacheArb_io_mem_req_bits_size), .io_mem_req_bits_signed (_dcacheArb_io_mem_req_bits_signed), .io_mem_req_bits_dprv (_dcacheArb_io_mem_req_bits_dprv), .io_mem_req_bits_dv (_dcacheArb_io_mem_req_bits_dv), .io_mem_req_bits_phys (_dcacheArb_io_mem_req_bits_phys), .io_mem_req_bits_no_resp (_dcacheArb_io_mem_req_bits_no_resp), .io_mem_s1_kill (_dcacheArb_io_mem_s1_kill), .io_mem_s1_data_data (_dcacheArb_io_mem_s1_data_data), .io_mem_s2_nack (_dcache_io_cpu_s2_nack), // @[HellaCache.scala:278:43] .io_mem_s2_nack_cause_raw (_dcache_io_cpu_s2_nack_cause_raw), // @[HellaCache.scala:278:43] .io_mem_s2_uncached (_dcache_io_cpu_s2_uncached), // @[HellaCache.scala:278:43] .io_mem_s2_paddr (_dcache_io_cpu_s2_paddr), // @[HellaCache.scala:278:43] .io_mem_resp_valid (_dcache_io_cpu_resp_valid), // @[HellaCache.scala:278:43] .io_mem_resp_bits_addr (_dcache_io_cpu_resp_bits_addr), // @[HellaCache.scala:278:43] .io_mem_resp_bits_tag (_dcache_io_cpu_resp_bits_tag), // @[HellaCache.scala:278:43] .io_mem_resp_bits_cmd (_dcache_io_cpu_resp_bits_cmd), // @[HellaCache.scala:278:43] .io_mem_resp_bits_size (_dcache_io_cpu_resp_bits_size), // @[HellaCache.scala:278:43] .io_mem_resp_bits_signed (_dcache_io_cpu_resp_bits_signed), // @[HellaCache.scala:278:43] .io_mem_resp_bits_dprv (_dcache_io_cpu_resp_bits_dprv), // @[HellaCache.scala:278:43] .io_mem_resp_bits_dv (_dcache_io_cpu_resp_bits_dv), // @[HellaCache.scala:278:43] .io_mem_resp_bits_data (_dcache_io_cpu_resp_bits_data), // @[HellaCache.scala:278:43] .io_mem_resp_bits_mask (_dcache_io_cpu_resp_bits_mask), // @[HellaCache.scala:278:43] .io_mem_resp_bits_replay (_dcache_io_cpu_resp_bits_replay), // @[HellaCache.scala:278:43] .io_mem_resp_bits_has_data (_dcache_io_cpu_resp_bits_has_data), // @[HellaCache.scala:278:43] .io_mem_resp_bits_data_word_bypass (_dcache_io_cpu_resp_bits_data_word_bypass), // @[HellaCache.scala:278:43] .io_mem_resp_bits_data_raw (_dcache_io_cpu_resp_bits_data_raw), // @[HellaCache.scala:278:43] .io_mem_resp_bits_store_data (_dcache_io_cpu_resp_bits_store_data), // @[HellaCache.scala:278:43] .io_mem_replay_next (_dcache_io_cpu_replay_next), // @[HellaCache.scala:278:43] .io_mem_s2_xcpt_ma_ld (_dcache_io_cpu_s2_xcpt_ma_ld), // @[HellaCache.scala:278:43] .io_mem_s2_xcpt_ma_st (_dcache_io_cpu_s2_xcpt_ma_st), // @[HellaCache.scala:278:43] .io_mem_s2_xcpt_pf_ld (_dcache_io_cpu_s2_xcpt_pf_ld), // @[HellaCache.scala:278:43] .io_mem_s2_xcpt_pf_st (_dcache_io_cpu_s2_xcpt_pf_st), // @[HellaCache.scala:278:43] .io_mem_s2_xcpt_ae_ld (_dcache_io_cpu_s2_xcpt_ae_ld), // @[HellaCache.scala:278:43] .io_mem_s2_xcpt_ae_st (_dcache_io_cpu_s2_xcpt_ae_st), // @[HellaCache.scala:278:43] .io_mem_s2_gpa (_dcache_io_cpu_s2_gpa), // @[HellaCache.scala:278:43] .io_mem_ordered (_dcache_io_cpu_ordered), // @[HellaCache.scala:278:43] .io_mem_store_pending (_dcache_io_cpu_store_pending), // @[HellaCache.scala:278:43] .io_mem_perf_acquire (_dcache_io_cpu_perf_acquire), // @[HellaCache.scala:278:43] .io_mem_perf_release (_dcache_io_cpu_perf_release), // @[HellaCache.scala:278:43] .io_mem_perf_grant (_dcache_io_cpu_perf_grant), // @[HellaCache.scala:278:43] .io_mem_perf_tlbMiss (_dcache_io_cpu_perf_tlbMiss), // @[HellaCache.scala:278:43] .io_mem_perf_blocked (_dcache_io_cpu_perf_blocked), // @[HellaCache.scala:278:43] .io_mem_perf_canAcceptStoreThenLoad (_dcache_io_cpu_perf_canAcceptStoreThenLoad), // @[HellaCache.scala:278:43] .io_mem_perf_canAcceptStoreThenRMW (_dcache_io_cpu_perf_canAcceptStoreThenRMW), // @[HellaCache.scala:278:43] .io_mem_perf_canAcceptLoadThenLoad (_dcache_io_cpu_perf_canAcceptLoadThenLoad), // @[HellaCache.scala:278:43] .io_mem_perf_storeBufferEmptyAfterLoad (_dcache_io_cpu_perf_storeBufferEmptyAfterLoad), // @[HellaCache.scala:278:43] .io_mem_perf_storeBufferEmptyAfterStore (_dcache_io_cpu_perf_storeBufferEmptyAfterStore), // @[HellaCache.scala:278:43] .io_mem_keep_clock_enabled (_dcacheArb_io_mem_keep_clock_enabled) ); // @[HellaCache.scala:292:25] PTW_4 ptw ( // @[PTW.scala:802:19] .clock (clock), .reset (reset), .io_requestor_0_req_ready (_ptw_io_requestor_0_req_ready), .io_requestor_0_req_valid (_dcache_io_ptw_req_valid), // @[HellaCache.scala:278:43] .io_requestor_0_req_bits_bits_addr (_dcache_io_ptw_req_bits_bits_addr), // @[HellaCache.scala:278:43] .io_requestor_0_req_bits_bits_need_gpa (_dcache_io_ptw_req_bits_bits_need_gpa), // @[HellaCache.scala:278:43] .io_requestor_0_resp_valid (_ptw_io_requestor_0_resp_valid), .io_requestor_0_resp_bits_ae_ptw (_ptw_io_requestor_0_resp_bits_ae_ptw), .io_requestor_0_resp_bits_ae_final (_ptw_io_requestor_0_resp_bits_ae_final), .io_requestor_0_resp_bits_pf (_ptw_io_requestor_0_resp_bits_pf), .io_requestor_0_resp_bits_gf (_ptw_io_requestor_0_resp_bits_gf), .io_requestor_0_resp_bits_hr (_ptw_io_requestor_0_resp_bits_hr), .io_requestor_0_resp_bits_hw (_ptw_io_requestor_0_resp_bits_hw), .io_requestor_0_resp_bits_hx (_ptw_io_requestor_0_resp_bits_hx), .io_requestor_0_resp_bits_pte_reserved_for_future (_ptw_io_requestor_0_resp_bits_pte_reserved_for_future), .io_requestor_0_resp_bits_pte_ppn (_ptw_io_requestor_0_resp_bits_pte_ppn), .io_requestor_0_resp_bits_pte_reserved_for_software (_ptw_io_requestor_0_resp_bits_pte_reserved_for_software), .io_requestor_0_resp_bits_pte_d (_ptw_io_requestor_0_resp_bits_pte_d), .io_requestor_0_resp_bits_pte_a (_ptw_io_requestor_0_resp_bits_pte_a), .io_requestor_0_resp_bits_pte_g (_ptw_io_requestor_0_resp_bits_pte_g), .io_requestor_0_resp_bits_pte_u (_ptw_io_requestor_0_resp_bits_pte_u), .io_requestor_0_resp_bits_pte_x (_ptw_io_requestor_0_resp_bits_pte_x), .io_requestor_0_resp_bits_pte_w (_ptw_io_requestor_0_resp_bits_pte_w), .io_requestor_0_resp_bits_pte_r (_ptw_io_requestor_0_resp_bits_pte_r), .io_requestor_0_resp_bits_pte_v (_ptw_io_requestor_0_resp_bits_pte_v), .io_requestor_0_resp_bits_level (_ptw_io_requestor_0_resp_bits_level), .io_requestor_0_resp_bits_homogeneous (_ptw_io_requestor_0_resp_bits_homogeneous), .io_requestor_0_resp_bits_gpa_valid (_ptw_io_requestor_0_resp_bits_gpa_valid), .io_requestor_0_resp_bits_gpa_bits (_ptw_io_requestor_0_resp_bits_gpa_bits), .io_requestor_0_resp_bits_gpa_is_pte (_ptw_io_requestor_0_resp_bits_gpa_is_pte), .io_requestor_0_ptbr_mode (_ptw_io_requestor_0_ptbr_mode), .io_requestor_0_ptbr_ppn (_ptw_io_requestor_0_ptbr_ppn), .io_requestor_0_status_debug (_ptw_io_requestor_0_status_debug), .io_requestor_0_status_cease (_ptw_io_requestor_0_status_cease), .io_requestor_0_status_wfi (_ptw_io_requestor_0_status_wfi), .io_requestor_0_status_isa (_ptw_io_requestor_0_status_isa), .io_requestor_0_status_dprv (_ptw_io_requestor_0_status_dprv), .io_requestor_0_status_dv (_ptw_io_requestor_0_status_dv), .io_requestor_0_status_prv (_ptw_io_requestor_0_status_prv), .io_requestor_0_status_v (_ptw_io_requestor_0_status_v), .io_requestor_0_status_sd (_ptw_io_requestor_0_status_sd), .io_requestor_0_status_mpv (_ptw_io_requestor_0_status_mpv), .io_requestor_0_status_gva (_ptw_io_requestor_0_status_gva), .io_requestor_0_status_tsr (_ptw_io_requestor_0_status_tsr), .io_requestor_0_status_tw (_ptw_io_requestor_0_status_tw), .io_requestor_0_status_tvm (_ptw_io_requestor_0_status_tvm), .io_requestor_0_status_mxr (_ptw_io_requestor_0_status_mxr), .io_requestor_0_status_sum (_ptw_io_requestor_0_status_sum), .io_requestor_0_status_mprv (_ptw_io_requestor_0_status_mprv), .io_requestor_0_status_fs (_ptw_io_requestor_0_status_fs), .io_requestor_0_status_mpp (_ptw_io_requestor_0_status_mpp), .io_requestor_0_status_spp (_ptw_io_requestor_0_status_spp), .io_requestor_0_status_mpie (_ptw_io_requestor_0_status_mpie), .io_requestor_0_status_spie (_ptw_io_requestor_0_status_spie), .io_requestor_0_status_mie (_ptw_io_requestor_0_status_mie), .io_requestor_0_status_sie (_ptw_io_requestor_0_status_sie), .io_requestor_0_hstatus_spvp (_ptw_io_requestor_0_hstatus_spvp), .io_requestor_0_hstatus_spv (_ptw_io_requestor_0_hstatus_spv), .io_requestor_0_hstatus_gva (_ptw_io_requestor_0_hstatus_gva), .io_requestor_0_gstatus_debug (_ptw_io_requestor_0_gstatus_debug), .io_requestor_0_gstatus_cease (_ptw_io_requestor_0_gstatus_cease), .io_requestor_0_gstatus_wfi (_ptw_io_requestor_0_gstatus_wfi), .io_requestor_0_gstatus_isa (_ptw_io_requestor_0_gstatus_isa), .io_requestor_0_gstatus_dprv (_ptw_io_requestor_0_gstatus_dprv), .io_requestor_0_gstatus_dv (_ptw_io_requestor_0_gstatus_dv), .io_requestor_0_gstatus_prv (_ptw_io_requestor_0_gstatus_prv), .io_requestor_0_gstatus_v (_ptw_io_requestor_0_gstatus_v), .io_requestor_0_gstatus_sd (_ptw_io_requestor_0_gstatus_sd), .io_requestor_0_gstatus_zero2 (_ptw_io_requestor_0_gstatus_zero2), .io_requestor_0_gstatus_mpv (_ptw_io_requestor_0_gstatus_mpv), .io_requestor_0_gstatus_gva (_ptw_io_requestor_0_gstatus_gva), .io_requestor_0_gstatus_mbe (_ptw_io_requestor_0_gstatus_mbe), .io_requestor_0_gstatus_sbe (_ptw_io_requestor_0_gstatus_sbe), .io_requestor_0_gstatus_sxl (_ptw_io_requestor_0_gstatus_sxl), .io_requestor_0_gstatus_zero1 (_ptw_io_requestor_0_gstatus_zero1), .io_requestor_0_gstatus_tsr (_ptw_io_requestor_0_gstatus_tsr), .io_requestor_0_gstatus_tw (_ptw_io_requestor_0_gstatus_tw), .io_requestor_0_gstatus_tvm (_ptw_io_requestor_0_gstatus_tvm), .io_requestor_0_gstatus_mxr (_ptw_io_requestor_0_gstatus_mxr), .io_requestor_0_gstatus_sum (_ptw_io_requestor_0_gstatus_sum), .io_requestor_0_gstatus_mprv (_ptw_io_requestor_0_gstatus_mprv), .io_requestor_0_gstatus_fs (_ptw_io_requestor_0_gstatus_fs), .io_requestor_0_gstatus_mpp (_ptw_io_requestor_0_gstatus_mpp), .io_requestor_0_gstatus_vs (_ptw_io_requestor_0_gstatus_vs), .io_requestor_0_gstatus_spp (_ptw_io_requestor_0_gstatus_spp), .io_requestor_0_gstatus_mpie (_ptw_io_requestor_0_gstatus_mpie), .io_requestor_0_gstatus_ube (_ptw_io_requestor_0_gstatus_ube), .io_requestor_0_gstatus_spie (_ptw_io_requestor_0_gstatus_spie), .io_requestor_0_gstatus_upie (_ptw_io_requestor_0_gstatus_upie), .io_requestor_0_gstatus_mie (_ptw_io_requestor_0_gstatus_mie), .io_requestor_0_gstatus_hie (_ptw_io_requestor_0_gstatus_hie), .io_requestor_0_gstatus_sie (_ptw_io_requestor_0_gstatus_sie), .io_requestor_0_gstatus_uie (_ptw_io_requestor_0_gstatus_uie), .io_requestor_0_pmp_0_cfg_l (_ptw_io_requestor_0_pmp_0_cfg_l), .io_requestor_0_pmp_0_cfg_a (_ptw_io_requestor_0_pmp_0_cfg_a), .io_requestor_0_pmp_0_cfg_x (_ptw_io_requestor_0_pmp_0_cfg_x), .io_requestor_0_pmp_0_cfg_w (_ptw_io_requestor_0_pmp_0_cfg_w), .io_requestor_0_pmp_0_cfg_r (_ptw_io_requestor_0_pmp_0_cfg_r), .io_requestor_0_pmp_0_addr (_ptw_io_requestor_0_pmp_0_addr), .io_requestor_0_pmp_0_mask (_ptw_io_requestor_0_pmp_0_mask), .io_requestor_0_pmp_1_cfg_l (_ptw_io_requestor_0_pmp_1_cfg_l), .io_requestor_0_pmp_1_cfg_a (_ptw_io_requestor_0_pmp_1_cfg_a), .io_requestor_0_pmp_1_cfg_x (_ptw_io_requestor_0_pmp_1_cfg_x), .io_requestor_0_pmp_1_cfg_w (_ptw_io_requestor_0_pmp_1_cfg_w), .io_requestor_0_pmp_1_cfg_r (_ptw_io_requestor_0_pmp_1_cfg_r), .io_requestor_0_pmp_1_addr (_ptw_io_requestor_0_pmp_1_addr), .io_requestor_0_pmp_1_mask (_ptw_io_requestor_0_pmp_1_mask), .io_requestor_0_pmp_2_cfg_l (_ptw_io_requestor_0_pmp_2_cfg_l), .io_requestor_0_pmp_2_cfg_a (_ptw_io_requestor_0_pmp_2_cfg_a), .io_requestor_0_pmp_2_cfg_x (_ptw_io_requestor_0_pmp_2_cfg_x), .io_requestor_0_pmp_2_cfg_w (_ptw_io_requestor_0_pmp_2_cfg_w), .io_requestor_0_pmp_2_cfg_r (_ptw_io_requestor_0_pmp_2_cfg_r), .io_requestor_0_pmp_2_addr (_ptw_io_requestor_0_pmp_2_addr), .io_requestor_0_pmp_2_mask (_ptw_io_requestor_0_pmp_2_mask), .io_requestor_0_pmp_3_cfg_l (_ptw_io_requestor_0_pmp_3_cfg_l), .io_requestor_0_pmp_3_cfg_a (_ptw_io_requestor_0_pmp_3_cfg_a), .io_requestor_0_pmp_3_cfg_x (_ptw_io_requestor_0_pmp_3_cfg_x), .io_requestor_0_pmp_3_cfg_w (_ptw_io_requestor_0_pmp_3_cfg_w), .io_requestor_0_pmp_3_cfg_r (_ptw_io_requestor_0_pmp_3_cfg_r), .io_requestor_0_pmp_3_addr (_ptw_io_requestor_0_pmp_3_addr), .io_requestor_0_pmp_3_mask (_ptw_io_requestor_0_pmp_3_mask), .io_requestor_0_pmp_4_cfg_l (_ptw_io_requestor_0_pmp_4_cfg_l), .io_requestor_0_pmp_4_cfg_a (_ptw_io_requestor_0_pmp_4_cfg_a), .io_requestor_0_pmp_4_cfg_x (_ptw_io_requestor_0_pmp_4_cfg_x), .io_requestor_0_pmp_4_cfg_w (_ptw_io_requestor_0_pmp_4_cfg_w), .io_requestor_0_pmp_4_cfg_r (_ptw_io_requestor_0_pmp_4_cfg_r), .io_requestor_0_pmp_4_addr (_ptw_io_requestor_0_pmp_4_addr), .io_requestor_0_pmp_4_mask (_ptw_io_requestor_0_pmp_4_mask), .io_requestor_0_pmp_5_cfg_l (_ptw_io_requestor_0_pmp_5_cfg_l), .io_requestor_0_pmp_5_cfg_a (_ptw_io_requestor_0_pmp_5_cfg_a), .io_requestor_0_pmp_5_cfg_x (_ptw_io_requestor_0_pmp_5_cfg_x), .io_requestor_0_pmp_5_cfg_w (_ptw_io_requestor_0_pmp_5_cfg_w), .io_requestor_0_pmp_5_cfg_r (_ptw_io_requestor_0_pmp_5_cfg_r), .io_requestor_0_pmp_5_addr (_ptw_io_requestor_0_pmp_5_addr), .io_requestor_0_pmp_5_mask (_ptw_io_requestor_0_pmp_5_mask), .io_requestor_0_pmp_6_cfg_l (_ptw_io_requestor_0_pmp_6_cfg_l), .io_requestor_0_pmp_6_cfg_a (_ptw_io_requestor_0_pmp_6_cfg_a), .io_requestor_0_pmp_6_cfg_x (_ptw_io_requestor_0_pmp_6_cfg_x), .io_requestor_0_pmp_6_cfg_w (_ptw_io_requestor_0_pmp_6_cfg_w), .io_requestor_0_pmp_6_cfg_r (_ptw_io_requestor_0_pmp_6_cfg_r), .io_requestor_0_pmp_6_addr (_ptw_io_requestor_0_pmp_6_addr), .io_requestor_0_pmp_6_mask (_ptw_io_requestor_0_pmp_6_mask), .io_requestor_0_pmp_7_cfg_l (_ptw_io_requestor_0_pmp_7_cfg_l), .io_requestor_0_pmp_7_cfg_a (_ptw_io_requestor_0_pmp_7_cfg_a), .io_requestor_0_pmp_7_cfg_x (_ptw_io_requestor_0_pmp_7_cfg_x), .io_requestor_0_pmp_7_cfg_w (_ptw_io_requestor_0_pmp_7_cfg_w), .io_requestor_0_pmp_7_cfg_r (_ptw_io_requestor_0_pmp_7_cfg_r), .io_requestor_0_pmp_7_addr (_ptw_io_requestor_0_pmp_7_addr), .io_requestor_0_pmp_7_mask (_ptw_io_requestor_0_pmp_7_mask), .io_requestor_0_customCSRs_csrs_0_ren (_ptw_io_requestor_0_customCSRs_csrs_0_ren), .io_requestor_0_customCSRs_csrs_0_wen (_ptw_io_requestor_0_customCSRs_csrs_0_wen), .io_requestor_0_customCSRs_csrs_0_wdata (_ptw_io_requestor_0_customCSRs_csrs_0_wdata), .io_requestor_0_customCSRs_csrs_0_value (_ptw_io_requestor_0_customCSRs_csrs_0_value), .io_requestor_0_customCSRs_csrs_1_ren (_ptw_io_requestor_0_customCSRs_csrs_1_ren), .io_requestor_0_customCSRs_csrs_1_wen (_ptw_io_requestor_0_customCSRs_csrs_1_wen), .io_requestor_0_customCSRs_csrs_1_wdata (_ptw_io_requestor_0_customCSRs_csrs_1_wdata), .io_requestor_0_customCSRs_csrs_1_value (_ptw_io_requestor_0_customCSRs_csrs_1_value), .io_requestor_0_customCSRs_csrs_2_ren (_ptw_io_requestor_0_customCSRs_csrs_2_ren), .io_requestor_0_customCSRs_csrs_2_wen (_ptw_io_requestor_0_customCSRs_csrs_2_wen), .io_requestor_0_customCSRs_csrs_2_wdata (_ptw_io_requestor_0_customCSRs_csrs_2_wdata), .io_requestor_0_customCSRs_csrs_2_value (_ptw_io_requestor_0_customCSRs_csrs_2_value), .io_requestor_0_customCSRs_csrs_3_ren (_ptw_io_requestor_0_customCSRs_csrs_3_ren), .io_requestor_0_customCSRs_csrs_3_wen (_ptw_io_requestor_0_customCSRs_csrs_3_wen), .io_requestor_0_customCSRs_csrs_3_wdata (_ptw_io_requestor_0_customCSRs_csrs_3_wdata), .io_requestor_0_customCSRs_csrs_3_value (_ptw_io_requestor_0_customCSRs_csrs_3_value), .io_requestor_1_req_ready (_ptw_io_requestor_1_req_ready), .io_requestor_1_req_valid (_frontend_io_ptw_req_valid), // @[Frontend.scala:393:28] .io_requestor_1_req_bits_valid (_frontend_io_ptw_req_bits_valid), // @[Frontend.scala:393:28] .io_requestor_1_req_bits_bits_addr (_frontend_io_ptw_req_bits_bits_addr), // @[Frontend.scala:393:28] .io_requestor_1_req_bits_bits_need_gpa (_frontend_io_ptw_req_bits_bits_need_gpa), // @[Frontend.scala:393:28] .io_requestor_1_resp_valid (_ptw_io_requestor_1_resp_valid), .io_requestor_1_resp_bits_ae_ptw (_ptw_io_requestor_1_resp_bits_ae_ptw), .io_requestor_1_resp_bits_ae_final (_ptw_io_requestor_1_resp_bits_ae_final), .io_requestor_1_resp_bits_pf (_ptw_io_requestor_1_resp_bits_pf), .io_requestor_1_resp_bits_gf (_ptw_io_requestor_1_resp_bits_gf), .io_requestor_1_resp_bits_hr (_ptw_io_requestor_1_resp_bits_hr), .io_requestor_1_resp_bits_hw (_ptw_io_requestor_1_resp_bits_hw), .io_requestor_1_resp_bits_hx (_ptw_io_requestor_1_resp_bits_hx), .io_requestor_1_resp_bits_pte_reserved_for_future (_ptw_io_requestor_1_resp_bits_pte_reserved_for_future), .io_requestor_1_resp_bits_pte_ppn (_ptw_io_requestor_1_resp_bits_pte_ppn), .io_requestor_1_resp_bits_pte_reserved_for_software (_ptw_io_requestor_1_resp_bits_pte_reserved_for_software), .io_requestor_1_resp_bits_pte_d (_ptw_io_requestor_1_resp_bits_pte_d), .io_requestor_1_resp_bits_pte_a (_ptw_io_requestor_1_resp_bits_pte_a), .io_requestor_1_resp_bits_pte_g (_ptw_io_requestor_1_resp_bits_pte_g), .io_requestor_1_resp_bits_pte_u (_ptw_io_requestor_1_resp_bits_pte_u), .io_requestor_1_resp_bits_pte_x (_ptw_io_requestor_1_resp_bits_pte_x), .io_requestor_1_resp_bits_pte_w (_ptw_io_requestor_1_resp_bits_pte_w), .io_requestor_1_resp_bits_pte_r (_ptw_io_requestor_1_resp_bits_pte_r), .io_requestor_1_resp_bits_pte_v (_ptw_io_requestor_1_resp_bits_pte_v), .io_requestor_1_resp_bits_level (_ptw_io_requestor_1_resp_bits_level), .io_requestor_1_resp_bits_homogeneous (_ptw_io_requestor_1_resp_bits_homogeneous), .io_requestor_1_resp_bits_gpa_valid (_ptw_io_requestor_1_resp_bits_gpa_valid), .io_requestor_1_resp_bits_gpa_bits (_ptw_io_requestor_1_resp_bits_gpa_bits), .io_requestor_1_resp_bits_gpa_is_pte (_ptw_io_requestor_1_resp_bits_gpa_is_pte), .io_requestor_1_ptbr_mode (_ptw_io_requestor_1_ptbr_mode), .io_requestor_1_ptbr_ppn (_ptw_io_requestor_1_ptbr_ppn), .io_requestor_1_status_debug (_ptw_io_requestor_1_status_debug), .io_requestor_1_status_cease (_ptw_io_requestor_1_status_cease), .io_requestor_1_status_wfi (_ptw_io_requestor_1_status_wfi), .io_requestor_1_status_isa (_ptw_io_requestor_1_status_isa), .io_requestor_1_status_dprv (_ptw_io_requestor_1_status_dprv), .io_requestor_1_status_dv (_ptw_io_requestor_1_status_dv), .io_requestor_1_status_prv (_ptw_io_requestor_1_status_prv), .io_requestor_1_status_v (_ptw_io_requestor_1_status_v), .io_requestor_1_status_sd (_ptw_io_requestor_1_status_sd), .io_requestor_1_status_mpv (_ptw_io_requestor_1_status_mpv), .io_requestor_1_status_gva (_ptw_io_requestor_1_status_gva), .io_requestor_1_status_tsr (_ptw_io_requestor_1_status_tsr), .io_requestor_1_status_tw (_ptw_io_requestor_1_status_tw), .io_requestor_1_status_tvm (_ptw_io_requestor_1_status_tvm), .io_requestor_1_status_mxr (_ptw_io_requestor_1_status_mxr), .io_requestor_1_status_sum (_ptw_io_requestor_1_status_sum), .io_requestor_1_status_mprv (_ptw_io_requestor_1_status_mprv), .io_requestor_1_status_fs (_ptw_io_requestor_1_status_fs), .io_requestor_1_status_mpp (_ptw_io_requestor_1_status_mpp), .io_requestor_1_status_spp (_ptw_io_requestor_1_status_spp), .io_requestor_1_status_mpie (_ptw_io_requestor_1_status_mpie), .io_requestor_1_status_spie (_ptw_io_requestor_1_status_spie), .io_requestor_1_status_mie (_ptw_io_requestor_1_status_mie), .io_requestor_1_status_sie (_ptw_io_requestor_1_status_sie), .io_requestor_1_hstatus_spvp (_ptw_io_requestor_1_hstatus_spvp), .io_requestor_1_hstatus_spv (_ptw_io_requestor_1_hstatus_spv), .io_requestor_1_hstatus_gva (_ptw_io_requestor_1_hstatus_gva), .io_requestor_1_gstatus_debug (_ptw_io_requestor_1_gstatus_debug), .io_requestor_1_gstatus_cease (_ptw_io_requestor_1_gstatus_cease), .io_requestor_1_gstatus_wfi (_ptw_io_requestor_1_gstatus_wfi), .io_requestor_1_gstatus_isa (_ptw_io_requestor_1_gstatus_isa), .io_requestor_1_gstatus_dprv (_ptw_io_requestor_1_gstatus_dprv), .io_requestor_1_gstatus_dv (_ptw_io_requestor_1_gstatus_dv), .io_requestor_1_gstatus_prv (_ptw_io_requestor_1_gstatus_prv), .io_requestor_1_gstatus_v (_ptw_io_requestor_1_gstatus_v), .io_requestor_1_gstatus_sd (_ptw_io_requestor_1_gstatus_sd), .io_requestor_1_gstatus_zero2 (_ptw_io_requestor_1_gstatus_zero2), .io_requestor_1_gstatus_mpv (_ptw_io_requestor_1_gstatus_mpv), .io_requestor_1_gstatus_gva (_ptw_io_requestor_1_gstatus_gva), .io_requestor_1_gstatus_mbe (_ptw_io_requestor_1_gstatus_mbe), .io_requestor_1_gstatus_sbe (_ptw_io_requestor_1_gstatus_sbe), .io_requestor_1_gstatus_sxl (_ptw_io_requestor_1_gstatus_sxl), .io_requestor_1_gstatus_zero1 (_ptw_io_requestor_1_gstatus_zero1), .io_requestor_1_gstatus_tsr (_ptw_io_requestor_1_gstatus_tsr), .io_requestor_1_gstatus_tw (_ptw_io_requestor_1_gstatus_tw), .io_requestor_1_gstatus_tvm (_ptw_io_requestor_1_gstatus_tvm), .io_requestor_1_gstatus_mxr (_ptw_io_requestor_1_gstatus_mxr), .io_requestor_1_gstatus_sum (_ptw_io_requestor_1_gstatus_sum), .io_requestor_1_gstatus_mprv (_ptw_io_requestor_1_gstatus_mprv), .io_requestor_1_gstatus_fs (_ptw_io_requestor_1_gstatus_fs), .io_requestor_1_gstatus_mpp (_ptw_io_requestor_1_gstatus_mpp), .io_requestor_1_gstatus_vs (_ptw_io_requestor_1_gstatus_vs), .io_requestor_1_gstatus_spp (_ptw_io_requestor_1_gstatus_spp), .io_requestor_1_gstatus_mpie (_ptw_io_requestor_1_gstatus_mpie), .io_requestor_1_gstatus_ube (_ptw_io_requestor_1_gstatus_ube), .io_requestor_1_gstatus_spie (_ptw_io_requestor_1_gstatus_spie), .io_requestor_1_gstatus_upie (_ptw_io_requestor_1_gstatus_upie), .io_requestor_1_gstatus_mie (_ptw_io_requestor_1_gstatus_mie), .io_requestor_1_gstatus_hie (_ptw_io_requestor_1_gstatus_hie), .io_requestor_1_gstatus_sie (_ptw_io_requestor_1_gstatus_sie), .io_requestor_1_gstatus_uie (_ptw_io_requestor_1_gstatus_uie), .io_requestor_1_pmp_0_cfg_l (_ptw_io_requestor_1_pmp_0_cfg_l), .io_requestor_1_pmp_0_cfg_a (_ptw_io_requestor_1_pmp_0_cfg_a), .io_requestor_1_pmp_0_cfg_x (_ptw_io_requestor_1_pmp_0_cfg_x), .io_requestor_1_pmp_0_cfg_w (_ptw_io_requestor_1_pmp_0_cfg_w), .io_requestor_1_pmp_0_cfg_r (_ptw_io_requestor_1_pmp_0_cfg_r), .io_requestor_1_pmp_0_addr (_ptw_io_requestor_1_pmp_0_addr), .io_requestor_1_pmp_0_mask (_ptw_io_requestor_1_pmp_0_mask), .io_requestor_1_pmp_1_cfg_l (_ptw_io_requestor_1_pmp_1_cfg_l), .io_requestor_1_pmp_1_cfg_a (_ptw_io_requestor_1_pmp_1_cfg_a), .io_requestor_1_pmp_1_cfg_x (_ptw_io_requestor_1_pmp_1_cfg_x), .io_requestor_1_pmp_1_cfg_w (_ptw_io_requestor_1_pmp_1_cfg_w), .io_requestor_1_pmp_1_cfg_r (_ptw_io_requestor_1_pmp_1_cfg_r), .io_requestor_1_pmp_1_addr (_ptw_io_requestor_1_pmp_1_addr), .io_requestor_1_pmp_1_mask (_ptw_io_requestor_1_pmp_1_mask), .io_requestor_1_pmp_2_cfg_l (_ptw_io_requestor_1_pmp_2_cfg_l), .io_requestor_1_pmp_2_cfg_a (_ptw_io_requestor_1_pmp_2_cfg_a), .io_requestor_1_pmp_2_cfg_x (_ptw_io_requestor_1_pmp_2_cfg_x), .io_requestor_1_pmp_2_cfg_w (_ptw_io_requestor_1_pmp_2_cfg_w), .io_requestor_1_pmp_2_cfg_r (_ptw_io_requestor_1_pmp_2_cfg_r), .io_requestor_1_pmp_2_addr (_ptw_io_requestor_1_pmp_2_addr), .io_requestor_1_pmp_2_mask (_ptw_io_requestor_1_pmp_2_mask), .io_requestor_1_pmp_3_cfg_l (_ptw_io_requestor_1_pmp_3_cfg_l), .io_requestor_1_pmp_3_cfg_a (_ptw_io_requestor_1_pmp_3_cfg_a), .io_requestor_1_pmp_3_cfg_x (_ptw_io_requestor_1_pmp_3_cfg_x), .io_requestor_1_pmp_3_cfg_w (_ptw_io_requestor_1_pmp_3_cfg_w), .io_requestor_1_pmp_3_cfg_r (_ptw_io_requestor_1_pmp_3_cfg_r), .io_requestor_1_pmp_3_addr (_ptw_io_requestor_1_pmp_3_addr), .io_requestor_1_pmp_3_mask (_ptw_io_requestor_1_pmp_3_mask), .io_requestor_1_pmp_4_cfg_l (_ptw_io_requestor_1_pmp_4_cfg_l), .io_requestor_1_pmp_4_cfg_a (_ptw_io_requestor_1_pmp_4_cfg_a), .io_requestor_1_pmp_4_cfg_x (_ptw_io_requestor_1_pmp_4_cfg_x), .io_requestor_1_pmp_4_cfg_w (_ptw_io_requestor_1_pmp_4_cfg_w), .io_requestor_1_pmp_4_cfg_r (_ptw_io_requestor_1_pmp_4_cfg_r), .io_requestor_1_pmp_4_addr (_ptw_io_requestor_1_pmp_4_addr), .io_requestor_1_pmp_4_mask (_ptw_io_requestor_1_pmp_4_mask), .io_requestor_1_pmp_5_cfg_l (_ptw_io_requestor_1_pmp_5_cfg_l), .io_requestor_1_pmp_5_cfg_a (_ptw_io_requestor_1_pmp_5_cfg_a), .io_requestor_1_pmp_5_cfg_x (_ptw_io_requestor_1_pmp_5_cfg_x), .io_requestor_1_pmp_5_cfg_w (_ptw_io_requestor_1_pmp_5_cfg_w), .io_requestor_1_pmp_5_cfg_r (_ptw_io_requestor_1_pmp_5_cfg_r), .io_requestor_1_pmp_5_addr (_ptw_io_requestor_1_pmp_5_addr), .io_requestor_1_pmp_5_mask (_ptw_io_requestor_1_pmp_5_mask), .io_requestor_1_pmp_6_cfg_l (_ptw_io_requestor_1_pmp_6_cfg_l), .io_requestor_1_pmp_6_cfg_a (_ptw_io_requestor_1_pmp_6_cfg_a), .io_requestor_1_pmp_6_cfg_x (_ptw_io_requestor_1_pmp_6_cfg_x), .io_requestor_1_pmp_6_cfg_w (_ptw_io_requestor_1_pmp_6_cfg_w), .io_requestor_1_pmp_6_cfg_r (_ptw_io_requestor_1_pmp_6_cfg_r), .io_requestor_1_pmp_6_addr (_ptw_io_requestor_1_pmp_6_addr), .io_requestor_1_pmp_6_mask (_ptw_io_requestor_1_pmp_6_mask), .io_requestor_1_pmp_7_cfg_l (_ptw_io_requestor_1_pmp_7_cfg_l), .io_requestor_1_pmp_7_cfg_a (_ptw_io_requestor_1_pmp_7_cfg_a), .io_requestor_1_pmp_7_cfg_x (_ptw_io_requestor_1_pmp_7_cfg_x), .io_requestor_1_pmp_7_cfg_w (_ptw_io_requestor_1_pmp_7_cfg_w), .io_requestor_1_pmp_7_cfg_r (_ptw_io_requestor_1_pmp_7_cfg_r), .io_requestor_1_pmp_7_addr (_ptw_io_requestor_1_pmp_7_addr), .io_requestor_1_pmp_7_mask (_ptw_io_requestor_1_pmp_7_mask), .io_requestor_1_customCSRs_csrs_0_ren (_ptw_io_requestor_1_customCSRs_csrs_0_ren), .io_requestor_1_customCSRs_csrs_0_wen (_ptw_io_requestor_1_customCSRs_csrs_0_wen), .io_requestor_1_customCSRs_csrs_0_wdata (_ptw_io_requestor_1_customCSRs_csrs_0_wdata), .io_requestor_1_customCSRs_csrs_0_value (_ptw_io_requestor_1_customCSRs_csrs_0_value), .io_requestor_1_customCSRs_csrs_1_ren (_ptw_io_requestor_1_customCSRs_csrs_1_ren), .io_requestor_1_customCSRs_csrs_1_wen (_ptw_io_requestor_1_customCSRs_csrs_1_wen), .io_requestor_1_customCSRs_csrs_1_wdata (_ptw_io_requestor_1_customCSRs_csrs_1_wdata), .io_requestor_1_customCSRs_csrs_1_value (_ptw_io_requestor_1_customCSRs_csrs_1_value), .io_requestor_1_customCSRs_csrs_2_ren (_ptw_io_requestor_1_customCSRs_csrs_2_ren), .io_requestor_1_customCSRs_csrs_2_wen (_ptw_io_requestor_1_customCSRs_csrs_2_wen), .io_requestor_1_customCSRs_csrs_2_wdata (_ptw_io_requestor_1_customCSRs_csrs_2_wdata), .io_requestor_1_customCSRs_csrs_2_value (_ptw_io_requestor_1_customCSRs_csrs_2_value), .io_requestor_1_customCSRs_csrs_3_ren (_ptw_io_requestor_1_customCSRs_csrs_3_ren), .io_requestor_1_customCSRs_csrs_3_wen (_ptw_io_requestor_1_customCSRs_csrs_3_wen), .io_requestor_1_customCSRs_csrs_3_wdata (_ptw_io_requestor_1_customCSRs_csrs_3_wdata), .io_requestor_1_customCSRs_csrs_3_value (_ptw_io_requestor_1_customCSRs_csrs_3_value), .io_mem_req_ready (_dcacheArb_io_requestor_0_req_ready), // @[HellaCache.scala:292:25] .io_mem_req_valid (_ptw_io_mem_req_valid), .io_mem_req_bits_addr (_ptw_io_mem_req_bits_addr), .io_mem_req_bits_dv (_ptw_io_mem_req_bits_dv), .io_mem_s1_kill (_ptw_io_mem_s1_kill), .io_mem_s2_nack (_dcacheArb_io_requestor_0_s2_nack), // @[HellaCache.scala:292:25] .io_mem_s2_nack_cause_raw (_dcacheArb_io_requestor_0_s2_nack_cause_raw), // @[HellaCache.scala:292:25] .io_mem_s2_uncached (_dcacheArb_io_requestor_0_s2_uncached), // @[HellaCache.scala:292:25] .io_mem_s2_paddr (_dcacheArb_io_requestor_0_s2_paddr), // @[HellaCache.scala:292:25] .io_mem_resp_valid (_dcacheArb_io_requestor_0_resp_valid), // @[HellaCache.scala:292:25] .io_mem_resp_bits_addr (_dcacheArb_io_requestor_0_resp_bits_addr), // @[HellaCache.scala:292:25] .io_mem_resp_bits_tag (_dcacheArb_io_requestor_0_resp_bits_tag), // @[HellaCache.scala:292:25] .io_mem_resp_bits_cmd (_dcacheArb_io_requestor_0_resp_bits_cmd), // @[HellaCache.scala:292:25] .io_mem_resp_bits_size (_dcacheArb_io_requestor_0_resp_bits_size), // @[HellaCache.scala:292:25] .io_mem_resp_bits_signed (_dcacheArb_io_requestor_0_resp_bits_signed), // @[HellaCache.scala:292:25] .io_mem_resp_bits_dprv (_dcacheArb_io_requestor_0_resp_bits_dprv), // @[HellaCache.scala:292:25] .io_mem_resp_bits_dv (_dcacheArb_io_requestor_0_resp_bits_dv), // @[HellaCache.scala:292:25] .io_mem_resp_bits_data (_dcacheArb_io_requestor_0_resp_bits_data), // @[HellaCache.scala:292:25] .io_mem_resp_bits_mask (_dcacheArb_io_requestor_0_resp_bits_mask), // @[HellaCache.scala:292:25] .io_mem_resp_bits_replay (_dcacheArb_io_requestor_0_resp_bits_replay), // @[HellaCache.scala:292:25] .io_mem_resp_bits_has_data (_dcacheArb_io_requestor_0_resp_bits_has_data), // @[HellaCache.scala:292:25] .io_mem_resp_bits_data_word_bypass (_dcacheArb_io_requestor_0_resp_bits_data_word_bypass), // @[HellaCache.scala:292:25] .io_mem_resp_bits_data_raw (_dcacheArb_io_requestor_0_resp_bits_data_raw), // @[HellaCache.scala:292:25] .io_mem_resp_bits_store_data (_dcacheArb_io_requestor_0_resp_bits_store_data), // @[HellaCache.scala:292:25] .io_mem_replay_next (_dcacheArb_io_requestor_0_replay_next), // @[HellaCache.scala:292:25] .io_mem_s2_xcpt_ma_ld (_dcacheArb_io_requestor_0_s2_xcpt_ma_ld), // @[HellaCache.scala:292:25] .io_mem_s2_xcpt_ma_st (_dcacheArb_io_requestor_0_s2_xcpt_ma_st), // @[HellaCache.scala:292:25] .io_mem_s2_xcpt_pf_ld (_dcacheArb_io_requestor_0_s2_xcpt_pf_ld), // @[HellaCache.scala:292:25] .io_mem_s2_xcpt_pf_st (_dcacheArb_io_requestor_0_s2_xcpt_pf_st), // @[HellaCache.scala:292:25] .io_mem_s2_xcpt_ae_ld (_dcacheArb_io_requestor_0_s2_xcpt_ae_ld), // @[HellaCache.scala:292:25] .io_mem_s2_xcpt_ae_st (_dcacheArb_io_requestor_0_s2_xcpt_ae_st), // @[HellaCache.scala:292:25] .io_mem_s2_gpa (_dcacheArb_io_requestor_0_s2_gpa), // @[HellaCache.scala:292:25] .io_mem_ordered (_dcacheArb_io_requestor_0_ordered), // @[HellaCache.scala:292:25] .io_mem_store_pending (_dcacheArb_io_requestor_0_store_pending), // @[HellaCache.scala:292:25] .io_mem_perf_acquire (_dcacheArb_io_requestor_0_perf_acquire), // @[HellaCache.scala:292:25] .io_mem_perf_release (_dcacheArb_io_requestor_0_perf_release), // @[HellaCache.scala:292:25] .io_mem_perf_grant (_dcacheArb_io_requestor_0_perf_grant), // @[HellaCache.scala:292:25] .io_mem_perf_tlbMiss (_dcacheArb_io_requestor_0_perf_tlbMiss), // @[HellaCache.scala:292:25] .io_mem_perf_blocked (_dcacheArb_io_requestor_0_perf_blocked), // @[HellaCache.scala:292:25] .io_mem_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenLoad), // @[HellaCache.scala:292:25] .io_mem_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_0_perf_canAcceptStoreThenRMW), // @[HellaCache.scala:292:25] .io_mem_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_0_perf_canAcceptLoadThenLoad), // @[HellaCache.scala:292:25] .io_mem_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterLoad), // @[HellaCache.scala:292:25] .io_mem_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_0_perf_storeBufferEmptyAfterStore), // @[HellaCache.scala:292:25] .io_dpath_ptbr_mode (_core_io_ptw_ptbr_mode), // @[RocketTile.scala:147:20] .io_dpath_ptbr_ppn (_core_io_ptw_ptbr_ppn), // @[RocketTile.scala:147:20] .io_dpath_sfence_valid (_core_io_ptw_sfence_valid), // @[RocketTile.scala:147:20] .io_dpath_sfence_bits_rs1 (_core_io_ptw_sfence_bits_rs1), // @[RocketTile.scala:147:20] .io_dpath_sfence_bits_rs2 (_core_io_ptw_sfence_bits_rs2), // @[RocketTile.scala:147:20] .io_dpath_sfence_bits_addr (_core_io_ptw_sfence_bits_addr), // @[RocketTile.scala:147:20] .io_dpath_sfence_bits_asid (_core_io_ptw_sfence_bits_asid), // @[RocketTile.scala:147:20] .io_dpath_sfence_bits_hv (_core_io_ptw_sfence_bits_hv), // @[RocketTile.scala:147:20] .io_dpath_sfence_bits_hg (_core_io_ptw_sfence_bits_hg), // @[RocketTile.scala:147:20] .io_dpath_status_debug (_core_io_ptw_status_debug), // @[RocketTile.scala:147:20] .io_dpath_status_cease (_core_io_ptw_status_cease), // @[RocketTile.scala:147:20] .io_dpath_status_wfi (_core_io_ptw_status_wfi), // @[RocketTile.scala:147:20] .io_dpath_status_isa (_core_io_ptw_status_isa), // @[RocketTile.scala:147:20] .io_dpath_status_dprv (_core_io_ptw_status_dprv), // @[RocketTile.scala:147:20] .io_dpath_status_dv (_core_io_ptw_status_dv), // @[RocketTile.scala:147:20] .io_dpath_status_prv (_core_io_ptw_status_prv), // @[RocketTile.scala:147:20] .io_dpath_status_v (_core_io_ptw_status_v), // @[RocketTile.scala:147:20] .io_dpath_status_sd (_core_io_ptw_status_sd), // @[RocketTile.scala:147:20] .io_dpath_status_mpv (_core_io_ptw_status_mpv), // @[RocketTile.scala:147:20] .io_dpath_status_gva (_core_io_ptw_status_gva), // @[RocketTile.scala:147:20] .io_dpath_status_tsr (_core_io_ptw_status_tsr), // @[RocketTile.scala:147:20] .io_dpath_status_tw (_core_io_ptw_status_tw), // @[RocketTile.scala:147:20] .io_dpath_status_tvm (_core_io_ptw_status_tvm), // @[RocketTile.scala:147:20] .io_dpath_status_mxr (_core_io_ptw_status_mxr), // @[RocketTile.scala:147:20] .io_dpath_status_sum (_core_io_ptw_status_sum), // @[RocketTile.scala:147:20] .io_dpath_status_mprv (_core_io_ptw_status_mprv), // @[RocketTile.scala:147:20] .io_dpath_status_fs (_core_io_ptw_status_fs), // @[RocketTile.scala:147:20] .io_dpath_status_mpp (_core_io_ptw_status_mpp), // @[RocketTile.scala:147:20] .io_dpath_status_spp (_core_io_ptw_status_spp), // @[RocketTile.scala:147:20] .io_dpath_status_mpie (_core_io_ptw_status_mpie), // @[RocketTile.scala:147:20] .io_dpath_status_spie (_core_io_ptw_status_spie), // @[RocketTile.scala:147:20] .io_dpath_status_mie (_core_io_ptw_status_mie), // @[RocketTile.scala:147:20] .io_dpath_status_sie (_core_io_ptw_status_sie), // @[RocketTile.scala:147:20] .io_dpath_hstatus_spvp (_core_io_ptw_hstatus_spvp), // @[RocketTile.scala:147:20] .io_dpath_hstatus_spv (_core_io_ptw_hstatus_spv), // @[RocketTile.scala:147:20] .io_dpath_hstatus_gva (_core_io_ptw_hstatus_gva), // @[RocketTile.scala:147:20] .io_dpath_gstatus_debug (_core_io_ptw_gstatus_debug), // @[RocketTile.scala:147:20] .io_dpath_gstatus_cease (_core_io_ptw_gstatus_cease), // @[RocketTile.scala:147:20] .io_dpath_gstatus_wfi (_core_io_ptw_gstatus_wfi), // @[RocketTile.scala:147:20] .io_dpath_gstatus_isa (_core_io_ptw_gstatus_isa), // @[RocketTile.scala:147:20] .io_dpath_gstatus_dprv (_core_io_ptw_gstatus_dprv), // @[RocketTile.scala:147:20] .io_dpath_gstatus_dv (_core_io_ptw_gstatus_dv), // @[RocketTile.scala:147:20] .io_dpath_gstatus_prv (_core_io_ptw_gstatus_prv), // @[RocketTile.scala:147:20] .io_dpath_gstatus_v (_core_io_ptw_gstatus_v), // @[RocketTile.scala:147:20] .io_dpath_gstatus_sd (_core_io_ptw_gstatus_sd), // @[RocketTile.scala:147:20] .io_dpath_gstatus_zero2 (_core_io_ptw_gstatus_zero2), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mpv (_core_io_ptw_gstatus_mpv), // @[RocketTile.scala:147:20] .io_dpath_gstatus_gva (_core_io_ptw_gstatus_gva), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mbe (_core_io_ptw_gstatus_mbe), // @[RocketTile.scala:147:20] .io_dpath_gstatus_sbe (_core_io_ptw_gstatus_sbe), // @[RocketTile.scala:147:20] .io_dpath_gstatus_sxl (_core_io_ptw_gstatus_sxl), // @[RocketTile.scala:147:20] .io_dpath_gstatus_zero1 (_core_io_ptw_gstatus_zero1), // @[RocketTile.scala:147:20] .io_dpath_gstatus_tsr (_core_io_ptw_gstatus_tsr), // @[RocketTile.scala:147:20] .io_dpath_gstatus_tw (_core_io_ptw_gstatus_tw), // @[RocketTile.scala:147:20] .io_dpath_gstatus_tvm (_core_io_ptw_gstatus_tvm), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mxr (_core_io_ptw_gstatus_mxr), // @[RocketTile.scala:147:20] .io_dpath_gstatus_sum (_core_io_ptw_gstatus_sum), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mprv (_core_io_ptw_gstatus_mprv), // @[RocketTile.scala:147:20] .io_dpath_gstatus_fs (_core_io_ptw_gstatus_fs), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mpp (_core_io_ptw_gstatus_mpp), // @[RocketTile.scala:147:20] .io_dpath_gstatus_vs (_core_io_ptw_gstatus_vs), // @[RocketTile.scala:147:20] .io_dpath_gstatus_spp (_core_io_ptw_gstatus_spp), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mpie (_core_io_ptw_gstatus_mpie), // @[RocketTile.scala:147:20] .io_dpath_gstatus_ube (_core_io_ptw_gstatus_ube), // @[RocketTile.scala:147:20] .io_dpath_gstatus_spie (_core_io_ptw_gstatus_spie), // @[RocketTile.scala:147:20] .io_dpath_gstatus_upie (_core_io_ptw_gstatus_upie), // @[RocketTile.scala:147:20] .io_dpath_gstatus_mie (_core_io_ptw_gstatus_mie), // @[RocketTile.scala:147:20] .io_dpath_gstatus_hie (_core_io_ptw_gstatus_hie), // @[RocketTile.scala:147:20] .io_dpath_gstatus_sie (_core_io_ptw_gstatus_sie), // @[RocketTile.scala:147:20] .io_dpath_gstatus_uie (_core_io_ptw_gstatus_uie), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_cfg_l (_core_io_ptw_pmp_0_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_cfg_a (_core_io_ptw_pmp_0_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_cfg_x (_core_io_ptw_pmp_0_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_cfg_w (_core_io_ptw_pmp_0_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_cfg_r (_core_io_ptw_pmp_0_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_addr (_core_io_ptw_pmp_0_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_0_mask (_core_io_ptw_pmp_0_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_cfg_l (_core_io_ptw_pmp_1_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_cfg_a (_core_io_ptw_pmp_1_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_cfg_x (_core_io_ptw_pmp_1_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_cfg_w (_core_io_ptw_pmp_1_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_cfg_r (_core_io_ptw_pmp_1_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_addr (_core_io_ptw_pmp_1_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_1_mask (_core_io_ptw_pmp_1_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_cfg_l (_core_io_ptw_pmp_2_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_cfg_a (_core_io_ptw_pmp_2_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_cfg_x (_core_io_ptw_pmp_2_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_cfg_w (_core_io_ptw_pmp_2_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_cfg_r (_core_io_ptw_pmp_2_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_addr (_core_io_ptw_pmp_2_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_2_mask (_core_io_ptw_pmp_2_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_cfg_l (_core_io_ptw_pmp_3_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_cfg_a (_core_io_ptw_pmp_3_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_cfg_x (_core_io_ptw_pmp_3_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_cfg_w (_core_io_ptw_pmp_3_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_cfg_r (_core_io_ptw_pmp_3_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_addr (_core_io_ptw_pmp_3_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_3_mask (_core_io_ptw_pmp_3_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_cfg_l (_core_io_ptw_pmp_4_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_cfg_a (_core_io_ptw_pmp_4_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_cfg_x (_core_io_ptw_pmp_4_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_cfg_w (_core_io_ptw_pmp_4_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_cfg_r (_core_io_ptw_pmp_4_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_addr (_core_io_ptw_pmp_4_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_4_mask (_core_io_ptw_pmp_4_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_cfg_l (_core_io_ptw_pmp_5_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_cfg_a (_core_io_ptw_pmp_5_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_cfg_x (_core_io_ptw_pmp_5_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_cfg_w (_core_io_ptw_pmp_5_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_cfg_r (_core_io_ptw_pmp_5_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_addr (_core_io_ptw_pmp_5_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_5_mask (_core_io_ptw_pmp_5_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_cfg_l (_core_io_ptw_pmp_6_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_cfg_a (_core_io_ptw_pmp_6_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_cfg_x (_core_io_ptw_pmp_6_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_cfg_w (_core_io_ptw_pmp_6_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_cfg_r (_core_io_ptw_pmp_6_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_addr (_core_io_ptw_pmp_6_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_6_mask (_core_io_ptw_pmp_6_mask), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_cfg_l (_core_io_ptw_pmp_7_cfg_l), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_cfg_a (_core_io_ptw_pmp_7_cfg_a), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_cfg_x (_core_io_ptw_pmp_7_cfg_x), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_cfg_w (_core_io_ptw_pmp_7_cfg_w), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_cfg_r (_core_io_ptw_pmp_7_cfg_r), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_addr (_core_io_ptw_pmp_7_addr), // @[RocketTile.scala:147:20] .io_dpath_pmp_7_mask (_core_io_ptw_pmp_7_mask), // @[RocketTile.scala:147:20] .io_dpath_perf_pte_miss (_ptw_io_dpath_perf_pte_miss), .io_dpath_perf_pte_hit (_ptw_io_dpath_perf_pte_hit), .io_dpath_customCSRs_csrs_0_ren (_core_io_ptw_customCSRs_csrs_0_ren), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_0_wen (_core_io_ptw_customCSRs_csrs_0_wen), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_0_wdata (_core_io_ptw_customCSRs_csrs_0_wdata), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_0_value (_core_io_ptw_customCSRs_csrs_0_value), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_1_ren (_core_io_ptw_customCSRs_csrs_1_ren), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_1_wen (_core_io_ptw_customCSRs_csrs_1_wen), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_1_wdata (_core_io_ptw_customCSRs_csrs_1_wdata), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_1_value (_core_io_ptw_customCSRs_csrs_1_value), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_2_ren (_core_io_ptw_customCSRs_csrs_2_ren), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_2_wen (_core_io_ptw_customCSRs_csrs_2_wen), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_2_wdata (_core_io_ptw_customCSRs_csrs_2_wdata), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_2_value (_core_io_ptw_customCSRs_csrs_2_value), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_3_ren (_core_io_ptw_customCSRs_csrs_3_ren), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_3_wen (_core_io_ptw_customCSRs_csrs_3_wen), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_3_wdata (_core_io_ptw_customCSRs_csrs_3_wdata), // @[RocketTile.scala:147:20] .io_dpath_customCSRs_csrs_3_value (_core_io_ptw_customCSRs_csrs_3_value), // @[RocketTile.scala:147:20] .io_dpath_clock_enabled (_ptw_io_dpath_clock_enabled) ); // @[PTW.scala:802:19] Rocket_4 core ( // @[RocketTile.scala:147:20] .clock (clock), .reset (reset), .io_hartid (hartIdSinkNodeIn), // @[MixedNode.scala:551:17] .io_interrupts_debug (intSinkNodeIn_0), // @[MixedNode.scala:551:17] .io_interrupts_mtip (intSinkNodeIn_2), // @[MixedNode.scala:551:17] .io_interrupts_msip (intSinkNodeIn_1), // @[MixedNode.scala:551:17] .io_interrupts_meip (intSinkNodeIn_3), // @[MixedNode.scala:551:17] .io_interrupts_seip (intSinkNodeIn_4), // @[MixedNode.scala:551:17] .io_imem_might_request (_core_io_imem_might_request), .io_imem_req_valid (_core_io_imem_req_valid), .io_imem_req_bits_pc (_core_io_imem_req_bits_pc), .io_imem_req_bits_speculative (_core_io_imem_req_bits_speculative), .io_imem_sfence_valid (_core_io_imem_sfence_valid), .io_imem_sfence_bits_rs1 (_core_io_imem_sfence_bits_rs1), .io_imem_sfence_bits_rs2 (_core_io_imem_sfence_bits_rs2), .io_imem_sfence_bits_addr (_core_io_imem_sfence_bits_addr), .io_imem_sfence_bits_asid (_core_io_imem_sfence_bits_asid), .io_imem_sfence_bits_hv (_core_io_imem_sfence_bits_hv), .io_imem_sfence_bits_hg (_core_io_imem_sfence_bits_hg), .io_imem_resp_ready (_core_io_imem_resp_ready), .io_imem_resp_valid (_frontend_io_cpu_resp_valid), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_cfiType (_frontend_io_cpu_resp_bits_btb_cfiType), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_taken (_frontend_io_cpu_resp_bits_btb_taken), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_mask (_frontend_io_cpu_resp_bits_btb_mask), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_bridx (_frontend_io_cpu_resp_bits_btb_bridx), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_target (_frontend_io_cpu_resp_bits_btb_target), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_entry (_frontend_io_cpu_resp_bits_btb_entry), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_bht_history (_frontend_io_cpu_resp_bits_btb_bht_history), // @[Frontend.scala:393:28] .io_imem_resp_bits_btb_bht_value (_frontend_io_cpu_resp_bits_btb_bht_value), // @[Frontend.scala:393:28] .io_imem_resp_bits_pc (_frontend_io_cpu_resp_bits_pc), // @[Frontend.scala:393:28] .io_imem_resp_bits_data (_frontend_io_cpu_resp_bits_data), // @[Frontend.scala:393:28] .io_imem_resp_bits_mask (_frontend_io_cpu_resp_bits_mask), // @[Frontend.scala:393:28] .io_imem_resp_bits_xcpt_pf_inst (_frontend_io_cpu_resp_bits_xcpt_pf_inst), // @[Frontend.scala:393:28] .io_imem_resp_bits_xcpt_gf_inst (_frontend_io_cpu_resp_bits_xcpt_gf_inst), // @[Frontend.scala:393:28] .io_imem_resp_bits_xcpt_ae_inst (_frontend_io_cpu_resp_bits_xcpt_ae_inst), // @[Frontend.scala:393:28] .io_imem_resp_bits_replay (_frontend_io_cpu_resp_bits_replay), // @[Frontend.scala:393:28] .io_imem_gpa_valid (_frontend_io_cpu_gpa_valid), // @[Frontend.scala:393:28] .io_imem_gpa_bits (_frontend_io_cpu_gpa_bits), // @[Frontend.scala:393:28] .io_imem_gpa_is_pte (_frontend_io_cpu_gpa_is_pte), // @[Frontend.scala:393:28] .io_imem_btb_update_valid (_core_io_imem_btb_update_valid), .io_imem_btb_update_bits_prediction_cfiType (_core_io_imem_btb_update_bits_prediction_cfiType), .io_imem_btb_update_bits_prediction_taken (_core_io_imem_btb_update_bits_prediction_taken), .io_imem_btb_update_bits_prediction_mask (_core_io_imem_btb_update_bits_prediction_mask), .io_imem_btb_update_bits_prediction_bridx (_core_io_imem_btb_update_bits_prediction_bridx), .io_imem_btb_update_bits_prediction_target (_core_io_imem_btb_update_bits_prediction_target), .io_imem_btb_update_bits_prediction_entry (_core_io_imem_btb_update_bits_prediction_entry), .io_imem_btb_update_bits_prediction_bht_history (_core_io_imem_btb_update_bits_prediction_bht_history), .io_imem_btb_update_bits_prediction_bht_value (_core_io_imem_btb_update_bits_prediction_bht_value), .io_imem_btb_update_bits_pc (_core_io_imem_btb_update_bits_pc), .io_imem_btb_update_bits_target (_core_io_imem_btb_update_bits_target), .io_imem_btb_update_bits_isValid (_core_io_imem_btb_update_bits_isValid), .io_imem_btb_update_bits_br_pc (_core_io_imem_btb_update_bits_br_pc), .io_imem_btb_update_bits_cfiType (_core_io_imem_btb_update_bits_cfiType), .io_imem_bht_update_valid (_core_io_imem_bht_update_valid), .io_imem_bht_update_bits_prediction_history (_core_io_imem_bht_update_bits_prediction_history), .io_imem_bht_update_bits_prediction_value (_core_io_imem_bht_update_bits_prediction_value), .io_imem_bht_update_bits_pc (_core_io_imem_bht_update_bits_pc), .io_imem_bht_update_bits_branch (_core_io_imem_bht_update_bits_branch), .io_imem_bht_update_bits_taken (_core_io_imem_bht_update_bits_taken), .io_imem_bht_update_bits_mispredict (_core_io_imem_bht_update_bits_mispredict), .io_imem_flush_icache (_core_io_imem_flush_icache), .io_imem_npc (_frontend_io_cpu_npc), // @[Frontend.scala:393:28] .io_imem_perf_acquire (_frontend_io_cpu_perf_acquire), // @[Frontend.scala:393:28] .io_imem_perf_tlbMiss (_frontend_io_cpu_perf_tlbMiss), // @[Frontend.scala:393:28] .io_imem_progress (_core_io_imem_progress), .io_dmem_req_ready (_dcacheArb_io_requestor_1_req_ready), // @[HellaCache.scala:292:25] .io_dmem_req_valid (_core_io_dmem_req_valid), .io_dmem_req_bits_addr (_core_io_dmem_req_bits_addr), .io_dmem_req_bits_tag (_core_io_dmem_req_bits_tag), .io_dmem_req_bits_cmd (_core_io_dmem_req_bits_cmd), .io_dmem_req_bits_size (_core_io_dmem_req_bits_size), .io_dmem_req_bits_signed (_core_io_dmem_req_bits_signed), .io_dmem_req_bits_dprv (_core_io_dmem_req_bits_dprv), .io_dmem_req_bits_dv (_core_io_dmem_req_bits_dv), .io_dmem_req_bits_no_resp (_core_io_dmem_req_bits_no_resp), .io_dmem_s1_kill (_core_io_dmem_s1_kill), .io_dmem_s1_data_data (_core_io_dmem_s1_data_data), .io_dmem_s2_nack (_dcacheArb_io_requestor_1_s2_nack), // @[HellaCache.scala:292:25] .io_dmem_s2_nack_cause_raw (_dcacheArb_io_requestor_1_s2_nack_cause_raw), // @[HellaCache.scala:292:25] .io_dmem_s2_uncached (_dcacheArb_io_requestor_1_s2_uncached), // @[HellaCache.scala:292:25] .io_dmem_s2_paddr (_dcacheArb_io_requestor_1_s2_paddr), // @[HellaCache.scala:292:25] .io_dmem_resp_valid (_dcacheArb_io_requestor_1_resp_valid), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_addr (_dcacheArb_io_requestor_1_resp_bits_addr), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_tag (_dcacheArb_io_requestor_1_resp_bits_tag), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_cmd (_dcacheArb_io_requestor_1_resp_bits_cmd), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_size (_dcacheArb_io_requestor_1_resp_bits_size), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_signed (_dcacheArb_io_requestor_1_resp_bits_signed), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_dprv (_dcacheArb_io_requestor_1_resp_bits_dprv), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_dv (_dcacheArb_io_requestor_1_resp_bits_dv), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_data (_dcacheArb_io_requestor_1_resp_bits_data), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_mask (_dcacheArb_io_requestor_1_resp_bits_mask), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_replay (_dcacheArb_io_requestor_1_resp_bits_replay), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_has_data (_dcacheArb_io_requestor_1_resp_bits_has_data), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_data_word_bypass (_dcacheArb_io_requestor_1_resp_bits_data_word_bypass), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_data_raw (_dcacheArb_io_requestor_1_resp_bits_data_raw), // @[HellaCache.scala:292:25] .io_dmem_resp_bits_store_data (_dcacheArb_io_requestor_1_resp_bits_store_data), // @[HellaCache.scala:292:25] .io_dmem_replay_next (_dcacheArb_io_requestor_1_replay_next), // @[HellaCache.scala:292:25] .io_dmem_s2_xcpt_ma_ld (_dcacheArb_io_requestor_1_s2_xcpt_ma_ld), // @[HellaCache.scala:292:25] .io_dmem_s2_xcpt_ma_st (_dcacheArb_io_requestor_1_s2_xcpt_ma_st), // @[HellaCache.scala:292:25] .io_dmem_s2_xcpt_pf_ld (_dcacheArb_io_requestor_1_s2_xcpt_pf_ld), // @[HellaCache.scala:292:25] .io_dmem_s2_xcpt_pf_st (_dcacheArb_io_requestor_1_s2_xcpt_pf_st), // @[HellaCache.scala:292:25] .io_dmem_s2_xcpt_ae_ld (_dcacheArb_io_requestor_1_s2_xcpt_ae_ld), // @[HellaCache.scala:292:25] .io_dmem_s2_xcpt_ae_st (_dcacheArb_io_requestor_1_s2_xcpt_ae_st), // @[HellaCache.scala:292:25] .io_dmem_s2_gpa (_dcacheArb_io_requestor_1_s2_gpa), // @[HellaCache.scala:292:25] .io_dmem_ordered (_dcacheArb_io_requestor_1_ordered), // @[HellaCache.scala:292:25] .io_dmem_store_pending (_dcacheArb_io_requestor_1_store_pending), // @[HellaCache.scala:292:25] .io_dmem_perf_acquire (_dcacheArb_io_requestor_1_perf_acquire), // @[HellaCache.scala:292:25] .io_dmem_perf_release (_dcacheArb_io_requestor_1_perf_release), // @[HellaCache.scala:292:25] .io_dmem_perf_grant (_dcacheArb_io_requestor_1_perf_grant), // @[HellaCache.scala:292:25] .io_dmem_perf_tlbMiss (_dcacheArb_io_requestor_1_perf_tlbMiss), // @[HellaCache.scala:292:25] .io_dmem_perf_blocked (_dcacheArb_io_requestor_1_perf_blocked), // @[HellaCache.scala:292:25] .io_dmem_perf_canAcceptStoreThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenLoad), // @[HellaCache.scala:292:25] .io_dmem_perf_canAcceptStoreThenRMW (_dcacheArb_io_requestor_1_perf_canAcceptStoreThenRMW), // @[HellaCache.scala:292:25] .io_dmem_perf_canAcceptLoadThenLoad (_dcacheArb_io_requestor_1_perf_canAcceptLoadThenLoad), // @[HellaCache.scala:292:25] .io_dmem_perf_storeBufferEmptyAfterLoad (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterLoad), // @[HellaCache.scala:292:25] .io_dmem_perf_storeBufferEmptyAfterStore (_dcacheArb_io_requestor_1_perf_storeBufferEmptyAfterStore), // @[HellaCache.scala:292:25] .io_dmem_keep_clock_enabled (_core_io_dmem_keep_clock_enabled), .io_ptw_ptbr_mode (_core_io_ptw_ptbr_mode), .io_ptw_ptbr_ppn (_core_io_ptw_ptbr_ppn), .io_ptw_sfence_valid (_core_io_ptw_sfence_valid), .io_ptw_sfence_bits_rs1 (_core_io_ptw_sfence_bits_rs1), .io_ptw_sfence_bits_rs2 (_core_io_ptw_sfence_bits_rs2), .io_ptw_sfence_bits_addr (_core_io_ptw_sfence_bits_addr), .io_ptw_sfence_bits_asid (_core_io_ptw_sfence_bits_asid), .io_ptw_sfence_bits_hv (_core_io_ptw_sfence_bits_hv), .io_ptw_sfence_bits_hg (_core_io_ptw_sfence_bits_hg), .io_ptw_status_debug (_core_io_ptw_status_debug), .io_ptw_status_cease (_core_io_ptw_status_cease), .io_ptw_status_wfi (_core_io_ptw_status_wfi), .io_ptw_status_isa (_core_io_ptw_status_isa), .io_ptw_status_dprv (_core_io_ptw_status_dprv), .io_ptw_status_dv (_core_io_ptw_status_dv), .io_ptw_status_prv (_core_io_ptw_status_prv), .io_ptw_status_v (_core_io_ptw_status_v), .io_ptw_status_sd (_core_io_ptw_status_sd), .io_ptw_status_mpv (_core_io_ptw_status_mpv), .io_ptw_status_gva (_core_io_ptw_status_gva), .io_ptw_status_tsr (_core_io_ptw_status_tsr), .io_ptw_status_tw (_core_io_ptw_status_tw), .io_ptw_status_tvm (_core_io_ptw_status_tvm), .io_ptw_status_mxr (_core_io_ptw_status_mxr), .io_ptw_status_sum (_core_io_ptw_status_sum), .io_ptw_status_mprv (_core_io_ptw_status_mprv), .io_ptw_status_fs (_core_io_ptw_status_fs), .io_ptw_status_mpp (_core_io_ptw_status_mpp), .io_ptw_status_spp (_core_io_ptw_status_spp), .io_ptw_status_mpie (_core_io_ptw_status_mpie), .io_ptw_status_spie (_core_io_ptw_status_spie), .io_ptw_status_mie (_core_io_ptw_status_mie), .io_ptw_status_sie (_core_io_ptw_status_sie), .io_ptw_hstatus_spvp (_core_io_ptw_hstatus_spvp), .io_ptw_hstatus_spv (_core_io_ptw_hstatus_spv), .io_ptw_hstatus_gva (_core_io_ptw_hstatus_gva), .io_ptw_gstatus_debug (_core_io_ptw_gstatus_debug), .io_ptw_gstatus_cease (_core_io_ptw_gstatus_cease), .io_ptw_gstatus_wfi (_core_io_ptw_gstatus_wfi), .io_ptw_gstatus_isa (_core_io_ptw_gstatus_isa), .io_ptw_gstatus_dprv (_core_io_ptw_gstatus_dprv), .io_ptw_gstatus_dv (_core_io_ptw_gstatus_dv), .io_ptw_gstatus_prv (_core_io_ptw_gstatus_prv), .io_ptw_gstatus_v (_core_io_ptw_gstatus_v), .io_ptw_gstatus_sd (_core_io_ptw_gstatus_sd), .io_ptw_gstatus_zero2 (_core_io_ptw_gstatus_zero2), .io_ptw_gstatus_mpv (_core_io_ptw_gstatus_mpv), .io_ptw_gstatus_gva (_core_io_ptw_gstatus_gva), .io_ptw_gstatus_mbe (_core_io_ptw_gstatus_mbe), .io_ptw_gstatus_sbe (_core_io_ptw_gstatus_sbe), .io_ptw_gstatus_sxl (_core_io_ptw_gstatus_sxl), .io_ptw_gstatus_zero1 (_core_io_ptw_gstatus_zero1), .io_ptw_gstatus_tsr (_core_io_ptw_gstatus_tsr), .io_ptw_gstatus_tw (_core_io_ptw_gstatus_tw), .io_ptw_gstatus_tvm (_core_io_ptw_gstatus_tvm), .io_ptw_gstatus_mxr (_core_io_ptw_gstatus_mxr), .io_ptw_gstatus_sum (_core_io_ptw_gstatus_sum), .io_ptw_gstatus_mprv (_core_io_ptw_gstatus_mprv), .io_ptw_gstatus_fs (_core_io_ptw_gstatus_fs), .io_ptw_gstatus_mpp (_core_io_ptw_gstatus_mpp), .io_ptw_gstatus_vs (_core_io_ptw_gstatus_vs), .io_ptw_gstatus_spp (_core_io_ptw_gstatus_spp), .io_ptw_gstatus_mpie (_core_io_ptw_gstatus_mpie), .io_ptw_gstatus_ube (_core_io_ptw_gstatus_ube), .io_ptw_gstatus_spie (_core_io_ptw_gstatus_spie), .io_ptw_gstatus_upie (_core_io_ptw_gstatus_upie), .io_ptw_gstatus_mie (_core_io_ptw_gstatus_mie), .io_ptw_gstatus_hie (_core_io_ptw_gstatus_hie), .io_ptw_gstatus_sie (_core_io_ptw_gstatus_sie), .io_ptw_gstatus_uie (_core_io_ptw_gstatus_uie), .io_ptw_pmp_0_cfg_l (_core_io_ptw_pmp_0_cfg_l), .io_ptw_pmp_0_cfg_a (_core_io_ptw_pmp_0_cfg_a), .io_ptw_pmp_0_cfg_x (_core_io_ptw_pmp_0_cfg_x), .io_ptw_pmp_0_cfg_w (_core_io_ptw_pmp_0_cfg_w), .io_ptw_pmp_0_cfg_r (_core_io_ptw_pmp_0_cfg_r), .io_ptw_pmp_0_addr (_core_io_ptw_pmp_0_addr), .io_ptw_pmp_0_mask (_core_io_ptw_pmp_0_mask), .io_ptw_pmp_1_cfg_l (_core_io_ptw_pmp_1_cfg_l), .io_ptw_pmp_1_cfg_a (_core_io_ptw_pmp_1_cfg_a), .io_ptw_pmp_1_cfg_x (_core_io_ptw_pmp_1_cfg_x), .io_ptw_pmp_1_cfg_w (_core_io_ptw_pmp_1_cfg_w), .io_ptw_pmp_1_cfg_r (_core_io_ptw_pmp_1_cfg_r), .io_ptw_pmp_1_addr (_core_io_ptw_pmp_1_addr), .io_ptw_pmp_1_mask (_core_io_ptw_pmp_1_mask), .io_ptw_pmp_2_cfg_l (_core_io_ptw_pmp_2_cfg_l), .io_ptw_pmp_2_cfg_a (_core_io_ptw_pmp_2_cfg_a), .io_ptw_pmp_2_cfg_x (_core_io_ptw_pmp_2_cfg_x), .io_ptw_pmp_2_cfg_w (_core_io_ptw_pmp_2_cfg_w), .io_ptw_pmp_2_cfg_r (_core_io_ptw_pmp_2_cfg_r), .io_ptw_pmp_2_addr (_core_io_ptw_pmp_2_addr), .io_ptw_pmp_2_mask (_core_io_ptw_pmp_2_mask), .io_ptw_pmp_3_cfg_l (_core_io_ptw_pmp_3_cfg_l), .io_ptw_pmp_3_cfg_a (_core_io_ptw_pmp_3_cfg_a), .io_ptw_pmp_3_cfg_x (_core_io_ptw_pmp_3_cfg_x), .io_ptw_pmp_3_cfg_w (_core_io_ptw_pmp_3_cfg_w), .io_ptw_pmp_3_cfg_r (_core_io_ptw_pmp_3_cfg_r), .io_ptw_pmp_3_addr (_core_io_ptw_pmp_3_addr), .io_ptw_pmp_3_mask (_core_io_ptw_pmp_3_mask), .io_ptw_pmp_4_cfg_l (_core_io_ptw_pmp_4_cfg_l), .io_ptw_pmp_4_cfg_a (_core_io_ptw_pmp_4_cfg_a), .io_ptw_pmp_4_cfg_x (_core_io_ptw_pmp_4_cfg_x), .io_ptw_pmp_4_cfg_w (_core_io_ptw_pmp_4_cfg_w), .io_ptw_pmp_4_cfg_r (_core_io_ptw_pmp_4_cfg_r), .io_ptw_pmp_4_addr (_core_io_ptw_pmp_4_addr), .io_ptw_pmp_4_mask (_core_io_ptw_pmp_4_mask), .io_ptw_pmp_5_cfg_l (_core_io_ptw_pmp_5_cfg_l), .io_ptw_pmp_5_cfg_a (_core_io_ptw_pmp_5_cfg_a), .io_ptw_pmp_5_cfg_x (_core_io_ptw_pmp_5_cfg_x), .io_ptw_pmp_5_cfg_w (_core_io_ptw_pmp_5_cfg_w), .io_ptw_pmp_5_cfg_r (_core_io_ptw_pmp_5_cfg_r), .io_ptw_pmp_5_addr (_core_io_ptw_pmp_5_addr), .io_ptw_pmp_5_mask (_core_io_ptw_pmp_5_mask), .io_ptw_pmp_6_cfg_l (_core_io_ptw_pmp_6_cfg_l), .io_ptw_pmp_6_cfg_a (_core_io_ptw_pmp_6_cfg_a), .io_ptw_pmp_6_cfg_x (_core_io_ptw_pmp_6_cfg_x), .io_ptw_pmp_6_cfg_w (_core_io_ptw_pmp_6_cfg_w), .io_ptw_pmp_6_cfg_r (_core_io_ptw_pmp_6_cfg_r), .io_ptw_pmp_6_addr (_core_io_ptw_pmp_6_addr), .io_ptw_pmp_6_mask (_core_io_ptw_pmp_6_mask), .io_ptw_pmp_7_cfg_l (_core_io_ptw_pmp_7_cfg_l), .io_ptw_pmp_7_cfg_a (_core_io_ptw_pmp_7_cfg_a), .io_ptw_pmp_7_cfg_x (_core_io_ptw_pmp_7_cfg_x), .io_ptw_pmp_7_cfg_w (_core_io_ptw_pmp_7_cfg_w), .io_ptw_pmp_7_cfg_r (_core_io_ptw_pmp_7_cfg_r), .io_ptw_pmp_7_addr (_core_io_ptw_pmp_7_addr), .io_ptw_pmp_7_mask (_core_io_ptw_pmp_7_mask), .io_ptw_perf_pte_miss (_ptw_io_dpath_perf_pte_miss), // @[PTW.scala:802:19] .io_ptw_perf_pte_hit (_ptw_io_dpath_perf_pte_hit), // @[PTW.scala:802:19] .io_ptw_customCSRs_csrs_0_ren (_core_io_ptw_customCSRs_csrs_0_ren), .io_ptw_customCSRs_csrs_0_wen (_core_io_ptw_customCSRs_csrs_0_wen), .io_ptw_customCSRs_csrs_0_wdata (_core_io_ptw_customCSRs_csrs_0_wdata), .io_ptw_customCSRs_csrs_0_value (_core_io_ptw_customCSRs_csrs_0_value), .io_ptw_customCSRs_csrs_1_ren (_core_io_ptw_customCSRs_csrs_1_ren), .io_ptw_customCSRs_csrs_1_wen (_core_io_ptw_customCSRs_csrs_1_wen), .io_ptw_customCSRs_csrs_1_wdata (_core_io_ptw_customCSRs_csrs_1_wdata), .io_ptw_customCSRs_csrs_1_value (_core_io_ptw_customCSRs_csrs_1_value), .io_ptw_customCSRs_csrs_2_ren (_core_io_ptw_customCSRs_csrs_2_ren), .io_ptw_customCSRs_csrs_2_wen (_core_io_ptw_customCSRs_csrs_2_wen), .io_ptw_customCSRs_csrs_2_wdata (_core_io_ptw_customCSRs_csrs_2_wdata), .io_ptw_customCSRs_csrs_2_value (_core_io_ptw_customCSRs_csrs_2_value), .io_ptw_customCSRs_csrs_3_ren (_core_io_ptw_customCSRs_csrs_3_ren), .io_ptw_customCSRs_csrs_3_wen (_core_io_ptw_customCSRs_csrs_3_wen), .io_ptw_customCSRs_csrs_3_wdata (_core_io_ptw_customCSRs_csrs_3_wdata), .io_ptw_customCSRs_csrs_3_value (_core_io_ptw_customCSRs_csrs_3_value), .io_ptw_clock_enabled (_ptw_io_dpath_clock_enabled), // @[PTW.scala:802:19] .io_fpu_hartid (_core_io_fpu_hartid), .io_fpu_time (_core_io_fpu_time), .io_fpu_inst (_core_io_fpu_inst), .io_fpu_fromint_data (_core_io_fpu_fromint_data), .io_fpu_fcsr_rm (_core_io_fpu_fcsr_rm), .io_fpu_fcsr_flags_valid (_fpuOpt_io_fcsr_flags_valid), // @[RocketTile.scala:242:62] .io_fpu_fcsr_flags_bits (_fpuOpt_io_fcsr_flags_bits), // @[RocketTile.scala:242:62] .io_fpu_store_data (_fpuOpt_io_store_data), // @[RocketTile.scala:242:62] .io_fpu_toint_data (_fpuOpt_io_toint_data), // @[RocketTile.scala:242:62] .io_fpu_ll_resp_val (_core_io_fpu_ll_resp_val), .io_fpu_ll_resp_type (_core_io_fpu_ll_resp_type), .io_fpu_ll_resp_tag (_core_io_fpu_ll_resp_tag), .io_fpu_ll_resp_data (_core_io_fpu_ll_resp_data), .io_fpu_valid (_core_io_fpu_valid), .io_fpu_fcsr_rdy (_fpuOpt_io_fcsr_rdy), // @[RocketTile.scala:242:62] .io_fpu_nack_mem (_fpuOpt_io_nack_mem), // @[RocketTile.scala:242:62] .io_fpu_illegal_rm (_fpuOpt_io_illegal_rm), // @[RocketTile.scala:242:62] .io_fpu_killx (_core_io_fpu_killx), .io_fpu_killm (_core_io_fpu_killm), .io_fpu_dec_ldst (_fpuOpt_io_dec_ldst), // @[RocketTile.scala:242:62] .io_fpu_dec_wen (_fpuOpt_io_dec_wen), // @[RocketTile.scala:242:62] .io_fpu_dec_ren1 (_fpuOpt_io_dec_ren1), // @[RocketTile.scala:242:62] .io_fpu_dec_ren2 (_fpuOpt_io_dec_ren2), // @[RocketTile.scala:242:62] .io_fpu_dec_ren3 (_fpuOpt_io_dec_ren3), // @[RocketTile.scala:242:62] .io_fpu_dec_swap12 (_fpuOpt_io_dec_swap12), // @[RocketTile.scala:242:62] .io_fpu_dec_swap23 (_fpuOpt_io_dec_swap23), // @[RocketTile.scala:242:62] .io_fpu_dec_typeTagIn (_fpuOpt_io_dec_typeTagIn), // @[RocketTile.scala:242:62] .io_fpu_dec_typeTagOut (_fpuOpt_io_dec_typeTagOut), // @[RocketTile.scala:242:62] .io_fpu_dec_fromint (_fpuOpt_io_dec_fromint), // @[RocketTile.scala:242:62] .io_fpu_dec_toint (_fpuOpt_io_dec_toint), // @[RocketTile.scala:242:62] .io_fpu_dec_fastpipe (_fpuOpt_io_dec_fastpipe), // @[RocketTile.scala:242:62] .io_fpu_dec_fma (_fpuOpt_io_dec_fma), // @[RocketTile.scala:242:62] .io_fpu_dec_div (_fpuOpt_io_dec_div), // @[RocketTile.scala:242:62] .io_fpu_dec_sqrt (_fpuOpt_io_dec_sqrt), // @[RocketTile.scala:242:62] .io_fpu_dec_wflags (_fpuOpt_io_dec_wflags), // @[RocketTile.scala:242:62] .io_fpu_dec_vec (_fpuOpt_io_dec_vec), // @[RocketTile.scala:242:62] .io_fpu_sboard_set (_fpuOpt_io_sboard_set), // @[RocketTile.scala:242:62] .io_fpu_sboard_clr (_fpuOpt_io_sboard_clr), // @[RocketTile.scala:242:62] .io_fpu_sboard_clra (_fpuOpt_io_sboard_clra), // @[RocketTile.scala:242:62] .io_fpu_keep_clock_enabled (_core_io_fpu_keep_clock_enabled), .io_trace_insns_0_valid (traceSourceNodeOut_insns_0_valid), .io_trace_insns_0_iaddr (traceSourceNodeOut_insns_0_iaddr), .io_trace_insns_0_insn (traceSourceNodeOut_insns_0_insn), .io_trace_insns_0_priv (traceSourceNodeOut_insns_0_priv), .io_trace_insns_0_exception (traceSourceNodeOut_insns_0_exception), .io_trace_insns_0_interrupt (traceSourceNodeOut_insns_0_interrupt), .io_trace_insns_0_cause (traceSourceNodeOut_insns_0_cause), .io_trace_insns_0_tval (traceSourceNodeOut_insns_0_tval), .io_trace_time (traceSourceNodeOut_time), .io_bpwatch_0_valid_0 (bpwatchSourceNodeOut_0_valid_0), .io_bpwatch_0_action (bpwatchSourceNodeOut_0_action), .io_wfi (_core_io_wfi) ); // @[RocketTile.scala:147:20] assign auto_buffer_out_a_valid = auto_buffer_out_a_valid_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_opcode = auto_buffer_out_a_bits_opcode_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_param = auto_buffer_out_a_bits_param_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_size = auto_buffer_out_a_bits_size_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_source = auto_buffer_out_a_bits_source_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_address = auto_buffer_out_a_bits_address_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_mask = auto_buffer_out_a_bits_mask_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_a_bits_data = auto_buffer_out_a_bits_data_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_b_ready = auto_buffer_out_b_ready_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_valid = auto_buffer_out_c_valid_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_bits_opcode = auto_buffer_out_c_bits_opcode_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_bits_param = auto_buffer_out_c_bits_param_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_bits_size = auto_buffer_out_c_bits_size_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_bits_source = auto_buffer_out_c_bits_source_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_bits_address = auto_buffer_out_c_bits_address_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_c_bits_data = auto_buffer_out_c_bits_data_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_d_ready = auto_buffer_out_d_ready_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_e_valid = auto_buffer_out_e_valid_0; // @[RocketTile.scala:141:7] assign auto_buffer_out_e_bits_sink = auto_buffer_out_e_bits_sink_0; // @[RocketTile.scala:141:7] assign auto_wfi_out_0 = auto_wfi_out_0_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_valid = auto_trace_source_out_insns_0_valid_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_iaddr = auto_trace_source_out_insns_0_iaddr_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_insn = auto_trace_source_out_insns_0_insn_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_priv = auto_trace_source_out_insns_0_priv_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_exception = auto_trace_source_out_insns_0_exception_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_interrupt = auto_trace_source_out_insns_0_interrupt_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_cause = auto_trace_source_out_insns_0_cause_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_insns_0_tval = auto_trace_source_out_insns_0_tval_0; // @[RocketTile.scala:141:7] assign auto_trace_source_out_time = auto_trace_source_out_time_0; // @[RocketTile.scala:141:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_59( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_69 io_out_sink_valid ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_182( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] output io_q // @[ShiftReg.scala:36:14] ); wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19] wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File ResetCatchAndSync.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.{withClockAndReset, withReset} /** Reset: asynchronous assert, * synchronous de-assert * */ class ResetCatchAndSync (sync: Int = 3) extends Module { override def desiredName = s"ResetCatchAndSync_d${sync}" val io = IO(new Bundle { val sync_reset = Output(Bool()) val psd = Input(new PSDTestMode()) }) // Bypass both the resets to the flops themselves (to prevent DFT holes on // those flops) and on the output of the synchronizer circuit (to control // reset to any flops this circuit drives). val post_psd_reset = Mux(io.psd.test_mode, io.psd.test_mode_reset, reset.asBool) withReset(post_psd_reset) { io.sync_reset := Mux(io.psd.test_mode, io.psd.test_mode_reset, ~AsyncResetSynchronizerShiftReg(true.B, sync)) } } object ResetCatchAndSync { def apply(clk: Clock, rst: Bool, sync: Int = 3, name: Option[String] = None, psd: Option[PSDTestMode] = None): Bool = { withClockAndReset(clk, rst) { val catcher = Module (new ResetCatchAndSync(sync)) if (name.isDefined) {catcher.suggestName(name.get)} catcher.io.psd <> psd.getOrElse(WireDefault(0.U.asTypeOf(new PSDTestMode()))) catcher.io.sync_reset } } def apply(clk: Clock, rst: Bool, sync: Int, name: String): Bool = apply(clk, rst, sync, Some(name)) def apply(clk: Clock, rst: Bool, name: String): Bool = apply(clk, rst, name = Some(name)) def apply(clk: Clock, rst: Bool, sync: Int, name: String, psd: PSDTestMode): Bool = apply(clk, rst, sync, Some(name), Some(psd)) def apply(clk: Clock, rst: Bool, name: String, psd: PSDTestMode): Bool = apply(clk, rst, name = Some(name), psd = Some(psd)) } File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) }
module ResetCatchAndSync_d3_1( // @[ResetCatchAndSync.scala:13:7] input clock, // @[ResetCatchAndSync.scala:13:7] input reset, // @[ResetCatchAndSync.scala:13:7] output io_sync_reset // @[ResetCatchAndSync.scala:17:14] ); wire _post_psd_reset_T = reset; // @[ResetCatchAndSync.scala:26:76] wire io_psd_test_mode = 1'h0; // @[ResetCatchAndSync.scala:13:7, :17:14] wire io_psd_test_mode_reset = 1'h0; // @[ResetCatchAndSync.scala:13:7, :17:14] wire _io_sync_reset_T_1; // @[ResetCatchAndSync.scala:28:25] wire io_sync_reset_0; // @[ResetCatchAndSync.scala:13:7] wire post_psd_reset = _post_psd_reset_T; // @[ResetCatchAndSync.scala:26:{27,76}] wire _io_sync_reset_WIRE; // @[ShiftReg.scala:48:24] wire _io_sync_reset_T = ~_io_sync_reset_WIRE; // @[ShiftReg.scala:48:24] assign _io_sync_reset_T_1 = _io_sync_reset_T; // @[ResetCatchAndSync.scala:28:25, :29:7] assign io_sync_reset_0 = _io_sync_reset_T_1; // @[ResetCatchAndSync.scala:13:7, :28:25] AsyncResetSynchronizerShiftReg_w1_d3_i0_114 io_sync_reset_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (post_psd_reset), // @[ResetCatchAndSync.scala:26:27] .io_q (_io_sync_reset_WIRE) ); // @[ShiftReg.scala:45:23] assign io_sync_reset = io_sync_reset_0; // @[ResetCatchAndSync.scala:13:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_47( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [12:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [25:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [12:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [1:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [12:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [25:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [12:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10] wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire a_first_count = 1'h0; // @[Edges.scala:234:25] wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire d_first_count = 1'h0; // @[Edges.scala:234:25] wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59] wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14] wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25] wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14] wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27] wire c_first_count = 1'h0; // @[Edges.scala:234:25] wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21] wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14] wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67] wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire a_first_last = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire d_first_last = 1'h1; // @[Edges.scala:232:33] wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43] wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33] wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28] wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7] wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [25:0] _c_first_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_first_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_first_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_first_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_set_wo_ready_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_set_wo_ready_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_opcodes_set_interm_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_opcodes_set_interm_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_sizes_set_interm_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_sizes_set_interm_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_opcodes_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_opcodes_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_sizes_set_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_sizes_set_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_probe_ack_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_probe_ack_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _c_probe_ack_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _c_probe_ack_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _same_cycle_resp_WIRE_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _same_cycle_resp_WIRE_1_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _same_cycle_resp_WIRE_2_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _same_cycle_resp_WIRE_3_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [25:0] _same_cycle_resp_WIRE_4_bits_address = 26'h0; // @[Bundles.scala:265:74] wire [25:0] _same_cycle_resp_WIRE_5_bits_address = 26'h0; // @[Bundles.scala:265:61] wire [12:0] _c_first_WIRE_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_first_WIRE_1_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_first_WIRE_2_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_first_WIRE_3_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_set_wo_ready_WIRE_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_set_wo_ready_WIRE_1_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_set_WIRE_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_set_WIRE_1_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_opcodes_set_interm_WIRE_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_opcodes_set_interm_WIRE_1_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_sizes_set_interm_WIRE_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_sizes_set_interm_WIRE_1_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_opcodes_set_WIRE_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_opcodes_set_WIRE_1_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_sizes_set_WIRE_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_sizes_set_WIRE_1_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_probe_ack_WIRE_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_probe_ack_WIRE_1_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_probe_ack_WIRE_2_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_probe_ack_WIRE_3_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _same_cycle_resp_WIRE_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _same_cycle_resp_WIRE_1_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _same_cycle_resp_WIRE_2_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _same_cycle_resp_WIRE_3_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _same_cycle_resp_WIRE_4_bits_source = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _same_cycle_resp_WIRE_5_bits_source = 13'h0; // @[Bundles.scala:265:61] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_beats1_decode_T_2 = 3'h0; // @[package.scala:243:46] wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [65537:0] _c_sizes_set_T_1 = 65538'h0; // @[Monitor.scala:768:52] wire [15:0] _c_opcodes_set_T = 16'h0; // @[Monitor.scala:767:79] wire [15:0] _c_sizes_set_T = 16'h0; // @[Monitor.scala:768:77] wire [65538:0] _c_opcodes_set_T_1 = 65539'h0; // @[Monitor.scala:767:54] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [8191:0] _c_set_wo_ready_T = 8192'h1; // @[OneHot.scala:58:35] wire [8191:0] _c_set_T = 8192'h1; // @[OneHot.scala:58:35] wire [16447:0] c_opcodes_set = 16448'h0; // @[Monitor.scala:740:34] wire [16447:0] c_sizes_set = 16448'h0; // @[Monitor.scala:741:34] wire [4111:0] c_set = 4112'h0; // @[Monitor.scala:738:34] wire [4111:0] c_set_wo_ready = 4112'h0; // @[Monitor.scala:739:34] wire [2:0] _c_first_beats1_decode_T_1 = 3'h7; // @[package.scala:243:76] wire [5:0] _c_first_beats1_decode_T = 6'h7; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [12:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [12:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [12:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [12:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [12:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [12:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [12:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [12:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [12:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [12:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [12:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [12:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_4 = source_ok_uncommonBits < 13'h1010; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20] wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31] wire [5:0] _GEN = 6'h7 << io_in_a_bits_size_0; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [5:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [5:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [2:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [25:0] _is_aligned_T = {23'h0, io_in_a_bits_address_0[2:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 26'h0; // @[Edges.scala:21:{16,24}] wire [2:0] _mask_sizeOH_T = {1'h0, io_in_a_bits_size_0}; // @[Misc.scala:202:34] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = &io_in_a_bits_size_0; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [12:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [12:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire [12:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}] wire [12:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}] wire [12:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}] wire [12:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}] wire [12:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}] wire [12:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}] wire [12:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}] wire [12:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_10 = source_ok_uncommonBits_1 < 13'h1010; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20] wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31] wire _T_665 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_665; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_665; // @[Decoupled.scala:51:35] wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] reg a_first_counter; // @[Edges.scala:229:27] wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25] wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28] wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28] wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25] wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [1:0] size; // @[Monitor.scala:389:22] reg [12:0] source; // @[Monitor.scala:390:22] reg [25:0] address; // @[Monitor.scala:391:22] wire _T_733 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_733; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_733; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_733; // @[Decoupled.scala:51:35] wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35] wire [5:0] _GEN_0 = 6'h7 << io_in_d_bits_size_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [2:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] reg d_first_counter; // @[Edges.scala:229:27] wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28] wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] size_1; // @[Monitor.scala:540:22] reg [12:0] source_1; // @[Monitor.scala:541:22] reg [4111:0] inflight; // @[Monitor.scala:614:27] reg [16447:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [16447:0] inflight_sizes; // @[Monitor.scala:618:33] wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] reg a_first_counter_1; // @[Edges.scala:229:27] wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25] wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28] wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28] wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25] wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] reg d_first_counter_1; // @[Edges.scala:229:27] wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28] wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire [4111:0] a_set; // @[Monitor.scala:626:34] wire [4111:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [16447:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [16447:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [15:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [15:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [15:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [15:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [15:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [15:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [15:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [15:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [15:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [16447:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [16447:0] _a_opcode_lookup_T_6 = {16444'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [16447:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[16447:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [16447:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [16447:0] _a_size_lookup_T_6 = {16444'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [16447:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[16447:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [8191:0] _GEN_2 = 8192'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [8191:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [8191:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[4111:0] : 4112'h0; // @[OneHot.scala:58:35] wire _T_598 = _T_665 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_598 ? _a_set_T[4111:0] : 4112'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [2:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [2:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[2:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 3'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [15:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [15:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [15:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [65538:0] _a_opcodes_set_T_1 = {65535'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[16447:0] : 16448'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [65537:0] _a_sizes_set_T_1 = {65535'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[16447:0] : 16448'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [4111:0] d_clr; // @[Monitor.scala:664:34] wire [4111:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [16447:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [16447:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [8191:0] _GEN_5 = 8192'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [8191:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [8191:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [8191:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [8191:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[4111:0] : 4112'h0; // @[OneHot.scala:58:35] wire _T_613 = _T_733 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_613 ? _d_clr_T[4111:0] : 4112'h0; // @[OneHot.scala:58:35] wire [65550:0] _d_opcodes_clr_T_5 = 65551'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[16447:0] : 16448'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [65550:0] _d_sizes_clr_T_5 = 65551'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[16447:0] : 16448'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [4111:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [4111:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [4111:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [16447:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [16447:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [16447:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [16447:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [16447:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [16447:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [4111:0] inflight_1; // @[Monitor.scala:726:35] wire [4111:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [16447:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [16447:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [16447:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [16447:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] reg d_first_counter_2; // @[Edges.scala:229:27] wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28] wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [16447:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [16447:0] _c_opcode_lookup_T_6 = {16444'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [16447:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[16447:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [16447:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [16447:0] _c_size_lookup_T_6 = {16444'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [16447:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[16447:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [4111:0] d_clr_1; // @[Monitor.scala:774:34] wire [4111:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [16447:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [16447:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_709 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_709 & d_release_ack_1 ? _d_clr_wo_ready_T_1[4111:0] : 4112'h0; // @[OneHot.scala:58:35] wire _T_691 = _T_733 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_691 ? _d_clr_T_1[4111:0] : 4112'h0; // @[OneHot.scala:58:35] wire [65550:0] _d_opcodes_clr_T_11 = 65551'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_691 ? _d_opcodes_clr_T_11[16447:0] : 16448'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [65550:0] _d_sizes_clr_T_11 = 65551'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_691 ? _d_sizes_clr_T_11[16447:0] : 16448'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 13'h0; // @[Monitor.scala:36:7, :795:113] wire [4111:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [4111:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [16447:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [16447:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [16447:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [16447:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_104( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_192 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundRawFNToRecFN_e8_s24_70( // @[RoundAnyRawFNToRecFN.scala:295:5] input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16] input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16] input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16] output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16] ); wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5] RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_70 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15] .io_invalidExc (io_invalidExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isNaN (io_in_isNaN_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isInf (io_in_isInf_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isZero (io_in_isZero_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sign (io_in_sign_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sExp (io_in_sExp_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sig (io_in_sig_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_out (io_out_0), .io_exceptionFlags (io_exceptionFlags_0) ); // @[RoundAnyRawFNToRecFN.scala:310:15] assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File LoadSegmenter.scala: package saturn.mem import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util._ import freechips.rocketchip.tile._ import saturn.common._ class LoadSegmenter(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val io = IO(new Bundle { val valid = Input(Bool()) val done = Output(Bool()) val op = Input(new VectorMemMacroOp) val compactor = Decoupled(new CompactorReq(dLenB)) val compactor_data = Input(UInt(dLen.W)) val resp = Decoupled(new Bundle { val data = UInt(dLen.W) val debug_id = UInt(debugIdSz.W) }) }) val segbuf = Module(new LoadSegmentBuffer(vParams.doubleBufferSegments)) val r_eidx = Reg(UInt(log2Ceil(maxVLMax).W)) val r_head = RegInit(true.B) val r_sidx = Reg(UInt(3.W)) val eidx = Mux(r_head, io.op.vstart, r_eidx) val sidx = Mux(r_head, io.op.segstart, r_sidx) val mem_size = io.op.elem_size val incr = (dLenB.U - (Mux(io.op.seg_nf === 0.U, eidx, sidx) << mem_size)(dLenOffBits-1,0)) >> mem_size val eidx_incr = Mux(io.op.seg_nf =/= 0.U, 1.U, incr) val sidx_incr = incr val next_eidx = eidx +& eidx_incr val next_sidx = sidx +& sidx_incr val sidx_tail = next_sidx > io.op.seg_nf val eidx_tail = next_eidx >= io.op.vl when (io.op.seg_nf === 0.U) { io.compactor.valid := io.valid && !segbuf.io.busy && io.resp.ready io.compactor.bits.head := eidx << mem_size io.compactor.bits.tail := Mux(eidx_tail, io.op.vl << mem_size, 0.U) } .otherwise { io.compactor.valid := io.valid && segbuf.io.in.ready io.compactor.bits.head := sidx << mem_size io.compactor.bits.tail := Mux(sidx_tail, (io.op.nf +& 1.U) << mem_size, 0.U) } segbuf.io.in.valid := io.valid && io.op.seg_nf =/= 0.U && io.compactor.ready segbuf.io.in.bits.eew := mem_size segbuf.io.in.bits.nf := io.op.nf segbuf.io.in.bits.data := io.compactor_data segbuf.io.in.bits.eidx := eidx segbuf.io.in.bits.sidx := sidx segbuf.io.in.bits.sidx_tail := sidx_tail segbuf.io.in.bits.tail := eidx_tail segbuf.io.in.bits.segstart := io.op.segstart segbuf.io.in.bits.debug_id := io.op.debug_id segbuf.io.out.ready := io.resp.ready io.resp.valid := Mux(segbuf.io.busy, segbuf.io.out.valid, io.compactor.ready && io.valid && io.op.seg_nf === 0.U) io.resp.bits.data := Mux(segbuf.io.busy, segbuf.io.out.bits.data, io.compactor_data) io.resp.bits.debug_id := Mux(segbuf.io.busy, segbuf.io.out.bits.debug_id, io.op.debug_id) val seg_ready = Mux(io.op.seg_nf === 0.U, !segbuf.io.busy && io.compactor.ready && io.resp.ready, segbuf.io.in.ready && io.compactor.ready && sidx_tail) when (segbuf.io.in.fire) { r_head := false.B when (r_head) { r_eidx := io.op.vstart } r_sidx := next_sidx when (next_sidx > io.op.nf) { r_sidx := 0.U } } io.done := false.B when (seg_ready && io.valid) { r_head := eidx_tail r_eidx := next_eidx io.done := eidx_tail } } File Bundles.scala: package saturn.common import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util._ import freechips.rocketchip.tile._ class VectorMemMacroOp(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val debug_id = UInt(debugIdSz.W) val base_offset = UInt(pgIdxBits.W) val page = UInt((paddrBits - pgIdxBits).W) val stride = UInt(pgIdxBits.W) val segstart = UInt(3.W) val segend = UInt(3.W) val vstart = UInt(log2Ceil(maxVLMax).W) val vl = UInt((1+log2Ceil(maxVLMax)).W) val mop = UInt(2.W) val vm = Bool() val nf = UInt(3.W) val idx_size = UInt(2.W) val elem_size = UInt(2.W) val whole_reg = Bool() val store = Bool() val fast_sg = Bool() def indexed = !mop.isOneOf(mopUnit, mopStrided) def seg_nf = Mux(whole_reg, 0.U, nf) def wr_nf = Mux(whole_reg, nf, 0.U) } class VectorIssueInst(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val pc = UInt(vaddrBitsExtended.W) val bits = UInt(32.W) val vconfig = new VConfig val vstart = UInt(log2Ceil(maxVLMax).W) val segstart = UInt(3.W) val segend = UInt(3.W) val rs1_data = UInt(xLen.W) val rs2_data = UInt(xLen.W) val page = UInt((paddrBits - pgIdxBits).W) val vat = UInt(vParams.vatSz.W) val rm = UInt(3.W) val emul = UInt(2.W) val fast_sg = Bool() val debug_id = UInt(debugIdSz.W) val mop = UInt(2.W) // stored separately from bits since dispatch may need to set this def opcode = bits(6,0) def store = opcode(5) def mem_idx_size = bits(13,12) def mem_elem_size = Mux(mop(0), vconfig.vtype.vsew, bits(13,12)) def vm = bits(25) def orig_mop = bits(27,26) def umop = bits(24,20) def nf = bits(31,29) def wr = orig_mop === mopUnit && umop === lumopWhole def seg_nf = Mux(wr, 0.U, nf) def wr_nf = Mux(wr, nf, 0.U) def vmu = opcode.isOneOf(opcLoad, opcStore) def rs1 = bits(19,15) def rs2 = bits(24,20) def rd = bits(11,7) def may_write_v0 = rd === 0.U && opcode =/= opcStore def funct3 = bits(14,12) def imm5 = bits(19,15) def imm5_sext = Cat(Fill(59, imm5(4)), imm5) def funct6 = bits(31,26) def writes_xrf = !vmu && ((funct3 === OPMVV && opmf6 === OPMFunct6.wrxunary0) || (funct3 === OPFVV && opff6 === OPFFunct6.wrfunary0)) def writes_frf = !vmu && (funct3 === OPFVV) def isOpi = funct3.isOneOf(OPIVV, OPIVI, OPIVX) def isOpm = funct3.isOneOf(OPMVV, OPMVX) def isOpf = funct3.isOneOf(OPFVV, OPFVF) def opmf6 = Mux(isOpm, OPMFunct6(funct6), OPMFunct6.illegal) def opif6 = Mux(isOpi, OPIFunct6(funct6), OPIFunct6.illegal) def opff6 = Mux(isOpf, OPFFunct6(funct6), OPFFunct6.illegal) } class BackendIssueInst(implicit p: Parameters) extends VectorIssueInst()(p) { val reduction = Bool() // accumulates into vd[0] val scalar_to_vd0 = Bool() // mv scalar to vd[0] val wide_vd = Bool() // vd reads/writes at 2xSEW val wide_vs2 = Bool() // vs2 reads at 2xSEW val writes_mask = Bool() // writes dest as a mask val reads_vs1_mask = Bool() // vs1 read as mask val reads_vs2_mask = Bool() // vs2 read as mask val rs1_is_rs2 = Bool() val nf_log2 = UInt(2.W) val renv1 = Bool() val renv2 = Bool() val renvd = Bool() val renvm = Bool() val wvd = Bool() } class IssueQueueInst(nSeqs: Int)(implicit p: Parameters) extends BackendIssueInst()(p) { val seq = UInt(nSeqs.W) } class VectorWrite(writeBits: Int)(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val eg = UInt(log2Ceil(32 * vLen / writeBits).W) def bankId = if (vrfBankBits == 0) 0.U else eg(vrfBankBits-1,0) val data = UInt(writeBits.W) val mask = UInt(writeBits.W) } class ScalarWrite extends Bundle { val data = UInt(64.W) val fp = Bool() val size = UInt(2.W) val rd = UInt(5.W) } class VectorReadReq(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val eg = UInt(log2Ceil(egsTotal).W) val oldest = Bool() } class VectorReadIO(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val req = Decoupled(new VectorReadReq) val resp = Input(UInt(dLen.W)) } class VectorIndexAccessIO(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val ready = Output(Bool()) val valid = Input(Bool()) val vrs = Input(UInt(5.W)) val eidx = Input(UInt((1+log2Ceil(maxVLMax)).W)) val eew = Input(UInt(2.W)) val idx = Output(UInt(64.W)) } class VectorMaskAccessIO(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val ready = Output(Bool()) val valid = Input(Bool()) val eidx = Input(UInt((1+log2Ceil(maxVLMax)).W)) val mask = Output(Bool()) } class MaskedByte(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val debug_id = UInt(debugIdSz.W) val data = UInt(8.W) val mask = Bool() } class ExecuteMicroOp(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val eidx = UInt(log2Ceil(maxVLMax).W) val vl = UInt((1+log2Ceil(maxVLMax)).W) val rvs1_data = UInt(dLen.W) val rvs2_data = UInt(dLen.W) val rvd_data = UInt(dLen.W) val rvm_data = UInt(dLen.W) val rvs1_elem = UInt(64.W) val rvs2_elem = UInt(64.W) val rvd_elem = UInt(64.W) val rvs1_eew = UInt(2.W) val rvs2_eew = UInt(2.W) val rvd_eew = UInt(2.W) val vd_eew = UInt(2.W) val rmask = UInt(dLenB.W) val wmask = UInt(dLenB.W) val full_tail_mask = UInt(dLen.W) val wvd_eg = UInt(log2Ceil(egsTotal).W) val funct3 = UInt(3.W) def isOpi = funct3.isOneOf(OPIVV, OPIVI, OPIVX) def isOpm = funct3.isOneOf(OPMVV, OPMVX) def isOpf = funct3.isOneOf(OPFVV, OPFVF) def opmf6 = Mux(isOpm, OPMFunct6(funct6), OPMFunct6.illegal) def opif6 = Mux(isOpi, OPIFunct6(funct6), OPIFunct6.illegal) def opff6 = Mux(isOpf, OPFFunct6(funct6), OPFFunct6.illegal) def vd_eew8 = vd_eew === 0.U def vd_eew16 = vd_eew === 1.U def vd_eew32 = vd_eew === 2.U def vd_eew64 = vd_eew === 3.U val funct6 = UInt(6.W) val rs1 = UInt(5.W) val rs2 = UInt(5.W) val rd = UInt(5.W) val vm = Bool() val head = Bool() val tail = Bool() val vat = UInt(vParams.vatSz.W) val acc = Bool() val rm = UInt(3.W) def vxrm = rm(1,0) def frm = rm } class StoreDataMicroOp(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val stdata = UInt(dLen.W) val stmask = UInt(dLenB.W) val debug_id = UInt(debugIdSz.W) val tail = Bool() val vat = UInt(vParams.vatSz.W) def asMaskedBytes = { val bytes = Wire(Vec(dLenB, new MaskedByte)) for (i <- 0 until dLenB) { bytes(i).data := stdata(((i+1)*8)-1,i*8) bytes(i).mask := stmask(i) bytes(i).debug_id := debug_id } bytes } } class LoadRespMicroOp(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val wvd_eg = UInt(log2Ceil(egsTotal).W) val wmask = UInt(dLenB.W) val tail = Bool() val debug_id = UInt(debugIdSz.W) val vat = UInt(vParams.vatSz.W) } class PermuteMicroOp(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val renv2 = Bool() val renvm = Bool() val rvs2_data = UInt(dLen.W) val eidx = UInt(log2Ceil(maxVLMax).W) val rvs2_eew = UInt(2.W) val rvm_data = UInt(dLen.W) val vmu = Bool() val vl = UInt((1+log2Ceil(maxVLMax)).W) val tail = Bool() } class PipeHazard(pipe_depth: Int)(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val latency = UInt(log2Ceil(pipe_depth).W) val eg = UInt(log2Ceil(egsTotal).W) def eg_oh = UIntToOH(eg) } class SequencerHazard(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val vat = UInt(vParams.vatSz.W) val rintent = UInt(egsTotal.W) val wintent = UInt(egsTotal.W) } class InstructionHazard(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val vat = UInt(vParams.vatSz.W) val rintent = UInt(32.W) val wintent = UInt(32.W) }
module LoadSegmenter( // @[LoadSegmenter.scala:11:7] input clock, // @[LoadSegmenter.scala:11:7] input reset, // @[LoadSegmenter.scala:11:7] input io_valid, // @[LoadSegmenter.scala:12:14] output io_done, // @[LoadSegmenter.scala:12:14] input [15:0] io_op_debug_id, // @[LoadSegmenter.scala:12:14] input [2:0] io_op_segstart, // @[LoadSegmenter.scala:12:14] input [6:0] io_op_vstart, // @[LoadSegmenter.scala:12:14] input [7:0] io_op_vl, // @[LoadSegmenter.scala:12:14] input [2:0] io_op_nf, // @[LoadSegmenter.scala:12:14] input [1:0] io_op_elem_size, // @[LoadSegmenter.scala:12:14] input io_op_whole_reg, // @[LoadSegmenter.scala:12:14] input io_compactor_ready, // @[LoadSegmenter.scala:12:14] output io_compactor_valid, // @[LoadSegmenter.scala:12:14] output [3:0] io_compactor_bits_head, // @[LoadSegmenter.scala:12:14] output [3:0] io_compactor_bits_tail, // @[LoadSegmenter.scala:12:14] input [127:0] io_compactor_data, // @[LoadSegmenter.scala:12:14] input io_resp_ready, // @[LoadSegmenter.scala:12:14] output io_resp_valid, // @[LoadSegmenter.scala:12:14] output [127:0] io_resp_bits_data, // @[LoadSegmenter.scala:12:14] output [15:0] io_resp_bits_debug_id // @[LoadSegmenter.scala:12:14] ); wire _segbuf_io_in_ready; // @[LoadSegmenter.scala:26:22] wire _segbuf_io_out_valid; // @[LoadSegmenter.scala:26:22] wire [127:0] _segbuf_io_out_bits_data; // @[LoadSegmenter.scala:26:22] wire [15:0] _segbuf_io_out_bits_debug_id; // @[LoadSegmenter.scala:26:22] wire _segbuf_io_busy; // @[LoadSegmenter.scala:26:22] reg [6:0] r_eidx; // @[LoadSegmenter.scala:28:19] reg r_head; // @[LoadSegmenter.scala:29:23] reg [2:0] r_sidx; // @[LoadSegmenter.scala:30:19] wire [6:0] eidx = r_head ? io_op_vstart : r_eidx; // @[LoadSegmenter.scala:28:19, :29:23, :31:17] wire [2:0] sidx = r_head ? io_op_segstart : r_sidx; // @[LoadSegmenter.scala:29:23, :30:19, :32:17] wire [2:0] _seg_ready_T = io_op_whole_reg ? 3'h0 : io_op_nf; // @[Bundles.scala:33:19] wire [9:0] _GEN = {8'h0, io_op_elem_size}; // @[LoadSegmenter.scala:35:64] wire [9:0] _incr_T_3 = {3'h0, (|_seg_ready_T) ? {4'h0, sidx} : eidx} << _GEN; // @[LoadSegmenter.scala:31:17, :32:17, :35:{29,43,64}] wire [4:0] incr = 5'h10 - {1'h0, _incr_T_3[3:0]} >> io_op_elem_size; // @[LoadSegmenter.scala:11:7, :35:{23,64,76,95}] wire [7:0] next_eidx = {1'h0, eidx} + {3'h0, (|_seg_ready_T) ? 5'h1 : incr}; // @[LoadSegmenter.scala:11:7, :31:17, :35:95, :36:{22,36}, :38:24] wire [5:0] _GEN_0 = {3'h0, sidx}; // @[LoadSegmenter.scala:32:17, :39:24] wire [5:0] next_sidx = _GEN_0 + {1'h0, incr}; // @[LoadSegmenter.scala:11:7, :35:95, :39:24] wire sidx_tail = next_sidx > {3'h0, _seg_ready_T}; // @[LoadSegmenter.scala:39:24, :41:29] wire eidx_tail = next_eidx >= io_op_vl; // @[LoadSegmenter.scala:38:24, :42:29] wire [9:0] _io_compactor_bits_head_T = {3'h0, eidx} << _GEN; // @[LoadSegmenter.scala:31:17, :35:64, :46:36] wire [10:0] _io_compactor_bits_tail_T = {3'h0, io_op_vl} << io_op_elem_size; // @[LoadSegmenter.scala:47:55] wire [5:0] _io_compactor_bits_head_T_1 = _GEN_0 << io_op_elem_size; // @[LoadSegmenter.scala:39:24, :50:36] wire [6:0] _io_compactor_bits_tail_T_3 = {3'h0, {1'h0, io_op_nf} + 4'h1} << io_op_elem_size; // @[LoadSegmenter.scala:11:7, :51:{56,64}] wire segbuf_io_in_valid = io_valid & (|_seg_ready_T) & io_compactor_ready; // @[LoadSegmenter.scala:54:{34,50,58}] wire _GEN_1 = ((|_seg_ready_T) ? _segbuf_io_in_ready & io_compactor_ready & sidx_tail : ~_segbuf_io_busy & io_compactor_ready & io_resp_ready) & io_valid; // @[LoadSegmenter.scala:26:22, :35:43, :41:29, :74:22, :75:{5,21,43}, :76:{24,46}, :88:19] wire _GEN_2 = _segbuf_io_in_ready & segbuf_io_in_valid; // @[Decoupled.scala:51:35] always @(posedge clock) begin // @[LoadSegmenter.scala:11:7] if (_GEN_1) // @[LoadSegmenter.scala:88:19] r_eidx <= next_eidx[6:0]; // @[LoadSegmenter.scala:28:19, :38:24, :90:12] else if (_GEN_2 & r_head) // @[Decoupled.scala:51:35] r_eidx <= io_op_vstart; // @[LoadSegmenter.scala:28:19] if (_GEN_2) // @[Decoupled.scala:51:35] r_sidx <= next_sidx > {3'h0, io_op_nf} ? 3'h0 : next_sidx[2:0]; // @[LoadSegmenter.scala:30:19, :39:24, :82:12, :83:{21,33}, :84:14] if (reset) // @[LoadSegmenter.scala:11:7] r_head <= 1'h1; // @[LoadSegmenter.scala:11:7, :29:23] else // @[LoadSegmenter.scala:11:7] r_head <= _GEN_1 ? eidx_tail : ~_GEN_2 & r_head; // @[Decoupled.scala:51:35] always @(posedge)
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_144( // @[AsyncQueue.scala:58:7] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in = 1'h1; // @[ShiftReg.scala:45:23] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_159 io_out_source_valid_0 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Crossing.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg} @deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2") class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val intnode = IntAdapterNode() lazy val module = new Impl class Impl extends LazyModuleImp(this) { (intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) => out := SynchronizerShiftReg(in, sync) } } } object IntSyncCrossingSource { def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) = { val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered)) intsource.node } } class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule { val node = IntSyncSourceNode(alreadyRegistered) lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl) class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := AsyncResetReg(Cat(in.reverse)).asBools } } class ImplRegistered extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := in } } } object IntSyncCrossingSink { @deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2") def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(sync) lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := SynchronizerShiftReg(in.sync, sync) } } } object IntSyncAsyncCrossingSink { def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(0) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := in.sync } } } object IntSyncSyncCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncSyncCrossingSink()) intsink.node } } class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(1) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := RegNext(in.sync) } } } object IntSyncRationalCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncRationalCrossingSink()) intsink.node } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File AsyncResetReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ /** This black-boxes an Async Reset * (or Set) * Register. * * Because Chisel doesn't support * parameterized black boxes, * we unfortunately have to * instantiate a number of these. * * We also have to hard-code the set/ * reset behavior. * * Do not confuse an asynchronous * reset signal with an asynchronously * reset reg. You should still * properly synchronize your reset * deassertion. * * @param d Data input * @param q Data Output * @param clk Clock Input * @param rst Reset Input * @param en Write Enable Input * */ class AsyncResetReg(resetValue: Int = 0) extends RawModule { val io = IO(new Bundle { val d = Input(Bool()) val q = Output(Bool()) val en = Input(Bool()) val clk = Input(Clock()) val rst = Input(Reset()) }) val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W))) when (io.en) { reg := io.d } io.q := reg } class SimpleRegIO(val w: Int) extends Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) } class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module { override def desiredName = s"AsyncResetRegVec_w${w}_i${init}" val io = IO(new SimpleRegIO(w)) val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W))) when (io.en) { reg := io.d } io.q := reg } object AsyncResetReg { // Create Single Registers def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = { val reg = Module(new AsyncResetReg(if (init) 1 else 0)) reg.io.d := d reg.io.clk := clk reg.io.rst := rst reg.io.en := true.B name.foreach(reg.suggestName(_)) reg.io.q } def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None) def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name)) // Create Vectors of Registers def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = { val w = updateData.getWidth max resetData.bitLength val reg = Module(new AsyncResetRegVec(w, resetData)) name.foreach(reg.suggestName(_)) reg.io.d := updateData reg.io.en := enable reg.io.q } def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData, resetData, enable, Some(name)) def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B) def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name)) def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable) def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name)) def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B) def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name)) }
module IntSyncCrossingSource_n1x1_23( // @[Crossing.scala:41:9] input clock, // @[Crossing.scala:41:9] input reset // @[Crossing.scala:41:9] ); wire auto_in_0 = 1'h0; // @[Crossing.scala:41:9] wire auto_out_sync_0 = 1'h0; // @[Crossing.scala:41:9] wire nodeIn_0 = 1'h0; // @[MixedNode.scala:551:17] wire nodeOut_sync_0 = 1'h0; // @[MixedNode.scala:542:17] AsyncResetRegVec_w1_i0_23 reg_0 ( // @[AsyncResetReg.scala:86:21] .clock (clock), .reset (reset) ); // @[AsyncResetReg.scala:86:21] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_239( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19] wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File MSHR.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import freechips.rocketchip.tilelink._ import TLPermissions._ import TLMessages._ import MetaData._ import chisel3.PrintableHelper import chisel3.experimental.dataview._ class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val a = Valid(new SourceARequest(params)) val b = Valid(new SourceBRequest(params)) val c = Valid(new SourceCRequest(params)) val d = Valid(new SourceDRequest(params)) val e = Valid(new SourceERequest(params)) val x = Valid(new SourceXRequest(params)) val dir = Valid(new DirectoryWrite(params)) val reload = Bool() // get next request via allocate (if any) } class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val way = UInt(params.wayBits.W) val blockB = Bool() val nestB = Bool() val blockC = Bool() val nestC = Bool() } class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val b_toN = Bool() // nested Probes may unhit us val b_toB = Bool() // nested Probes may demote us val b_clr_dirty = Bool() // nested Probes clear dirty val c_set_dirty = Bool() // nested Releases MAY set dirty } sealed trait CacheState { val code = CacheState.index.U CacheState.index = CacheState.index + 1 } object CacheState { var index = 0 } case object S_INVALID extends CacheState case object S_BRANCH extends CacheState case object S_BRANCH_C extends CacheState case object S_TIP extends CacheState case object S_TIP_C extends CacheState case object S_TIP_CD extends CacheState case object S_TIP_D extends CacheState case object S_TRUNK_C extends CacheState case object S_TRUNK_CD extends CacheState class MSHR(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup val status = Valid(new MSHRStatus(params)) val schedule = Decoupled(new ScheduleRequest(params)) val sinkc = Flipped(Valid(new SinkCResponse(params))) val sinkd = Flipped(Valid(new SinkDResponse(params))) val sinke = Flipped(Valid(new SinkEResponse(params))) val nestedwb = Flipped(new NestedWriteback(params)) }) val request_valid = RegInit(false.B) val request = Reg(new FullRequest(params)) val meta_valid = RegInit(false.B) val meta = Reg(new DirectoryResult(params)) // Define which states are valid when (meta_valid) { when (meta.state === INVALID) { assert (!meta.clients.orR) assert (!meta.dirty) } when (meta.state === BRANCH) { assert (!meta.dirty) } when (meta.state === TRUNK) { assert (meta.clients.orR) assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one } when (meta.state === TIP) { // noop } } // Completed transitions (s_ = scheduled), (w_ = waiting) val s_rprobe = RegInit(true.B) // B val w_rprobeackfirst = RegInit(true.B) val w_rprobeacklast = RegInit(true.B) val s_release = RegInit(true.B) // CW w_rprobeackfirst val w_releaseack = RegInit(true.B) val s_pprobe = RegInit(true.B) // B val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1] val s_flush = RegInit(true.B) // X w_releaseack val w_grantfirst = RegInit(true.B) val w_grantlast = RegInit(true.B) val w_grant = RegInit(true.B) // first | last depending on wormhole val w_pprobeackfirst = RegInit(true.B) val w_pprobeacklast = RegInit(true.B) val w_pprobeack = RegInit(true.B) // first | last depending on wormhole val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*) val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD val s_execute = RegInit(true.B) // D w_pprobeack, w_grant val w_grantack = RegInit(true.B) val s_writeback = RegInit(true.B) // W w_* // [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall) // However, inB and outC are higher priority than outB, so s_release and s_pprobe // may be safely issued while blockB. Thus we must NOT try to schedule the // potentially stuck s_acquire with either of them (scheduler is all or none). // Meta-data that we discover underway val sink = Reg(UInt(params.outer.bundle.sinkBits.W)) val gotT = Reg(Bool()) val bad_grant = Reg(Bool()) val probes_done = Reg(UInt(params.clientBits.W)) val probes_toN = Reg(UInt(params.clientBits.W)) val probes_noT = Reg(Bool()) // When a nested transaction completes, update our meta data when (meta_valid && meta.state =/= INVALID && io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) { when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B } when (io.nestedwb.c_set_dirty) { meta.dirty := true.B } when (io.nestedwb.b_toB) { meta.state := BRANCH } when (io.nestedwb.b_toN) { meta.hit := false.B } } // Scheduler status io.status.valid := request_valid io.status.bits.set := request.set io.status.bits.tag := request.tag io.status.bits.way := meta.way io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst) io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst // The above rules ensure we will block and not nest an outer probe while still doing our // own inner probes. Thus every probe wakes exactly one MSHR. io.status.bits.blockC := !meta_valid io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst) // The w_grantfirst in nestC is necessary to deal with: // acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock // ... this is possible because the release+probe can be for same set, but different tag // We can only demand: block, nest, or queue assert (!io.status.bits.nestB || !io.status.bits.blockB) assert (!io.status.bits.nestC || !io.status.bits.blockC) // Scheduler requests val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe io.schedule.bits.b.valid := !s_rprobe || !s_pprobe io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst) io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant io.schedule.bits.e.valid := !s_grantack && w_grantfirst io.schedule.bits.x.valid := !s_flush && w_releaseack io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait) io.schedule.bits.reload := no_wait io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid || io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid || io.schedule.bits.dir.valid // Schedule completions when (io.schedule.ready) { s_rprobe := true.B when (w_rprobeackfirst) { s_release := true.B } s_pprobe := true.B when (s_release && s_pprobe) { s_acquire := true.B } when (w_releaseack) { s_flush := true.B } when (w_pprobeackfirst) { s_probeack := true.B } when (w_grantfirst) { s_grantack := true.B } when (w_pprobeack && w_grant) { s_execute := true.B } when (no_wait) { s_writeback := true.B } // Await the next operation when (no_wait) { request_valid := false.B meta_valid := false.B } } // Resulting meta-data val final_meta_writeback = WireInit(meta) val req_clientBit = params.clientBit(request.source) val req_needT = needT(request.opcode, request.param) val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm val meta_no_clients = !meta.clients.orR val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT) when (request.prio(2) && (!params.firstLevel).B) { // always a hit final_meta_writeback.dirty := meta.dirty || request.opcode(0) final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state) final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U) final_meta_writeback.hit := true.B // chained requests are hits } .elsewhen (request.control && params.control.B) { // request.prio(0) when (meta.hit) { final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := meta.clients & ~probes_toN } final_meta_writeback.hit := false.B } .otherwise { final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2) final_meta_writeback.state := Mux(req_needT, Mux(req_acquire, TRUNK, TIP), Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH), MuxLookup(meta.state, 0.U(2.W))(Seq( INVALID -> BRANCH, BRANCH -> BRANCH, TRUNK -> TIP, TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP))))) final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) | Mux(req_acquire, req_clientBit, 0.U) final_meta_writeback.tag := request.tag final_meta_writeback.hit := true.B } when (bad_grant) { when (meta.hit) { // upgrade failed (B -> T) assert (!meta_valid || meta.state === BRANCH) final_meta_writeback.hit := true.B final_meta_writeback.dirty := false.B final_meta_writeback.state := BRANCH final_meta_writeback.clients := meta.clients & ~probes_toN } .otherwise { // failed N -> (T or B) final_meta_writeback.hit := false.B final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := 0.U } } val invalid = Wire(new DirectoryEntry(params)) invalid.dirty := false.B invalid.state := INVALID invalid.clients := 0.U invalid.tag := 0.U // Just because a client says BtoT, by the time we process the request he may be N. // Therefore, we must consult our own meta-data state to confirm he owns the line still. val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR // The client asking us to act is proof they don't have permissions. val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U) io.schedule.bits.a.bits.tag := request.tag io.schedule.bits.a.bits.set := request.set io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB) io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U || !(request.opcode === PutFullData || request.opcode === AcquirePerm) io.schedule.bits.a.bits.source := 0.U io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB))) io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag) io.schedule.bits.b.bits.set := request.set io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release) io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN) io.schedule.bits.c.bits.source := 0.U io.schedule.bits.c.bits.tag := meta.tag io.schedule.bits.c.bits.set := request.set io.schedule.bits.c.bits.way := meta.way io.schedule.bits.c.bits.dirty := meta.dirty io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param, MuxLookup(request.param, request.param)(Seq( NtoB -> Mux(req_promoteT, NtoT, NtoB), BtoT -> Mux(honour_BtoT, BtoT, NtoT), NtoT -> NtoT))) io.schedule.bits.d.bits.sink := 0.U io.schedule.bits.d.bits.way := meta.way io.schedule.bits.d.bits.bad := bad_grant io.schedule.bits.e.bits.sink := sink io.schedule.bits.x.bits.fail := false.B io.schedule.bits.dir.bits.set := request.set io.schedule.bits.dir.bits.way := meta.way io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback)) // Coverage of state transitions def cacheState(entry: DirectoryEntry, hit: Bool) = { val out = WireDefault(0.U) val c = entry.clients.orR val d = entry.dirty switch (entry.state) { is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) } is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) } is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) } is (INVALID) { out := S_INVALID.code } } when (!hit) { out := S_INVALID.code } out } val p = !params.lastLevel // can be probed val c = !params.firstLevel // can be acquired val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read) val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist val f = params.control // flush control register exists val cfg = (p, c, m, r, f) val b = r || p // can reach branch state (via probe downgrade or read-only device) // The cache must be used for something or we would not be here require(c || m) val evict = cacheState(meta, !meta.hit) val before = cacheState(meta, meta.hit) val after = cacheState(final_meta_writeback, true.B) def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}") } else { assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}") } if (cover && f) { params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}") } else { assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}") } } def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}") } else { assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}") } } when ((!s_release && w_rprobeackfirst) && io.schedule.ready) { eviction(S_BRANCH, b) // MMIO read to read-only device eviction(S_BRANCH_C, b && c) // you need children to become C eviction(S_TIP, true) // MMIO read || clean release can lead to this state eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_D, true) // MMIO write || dirty release lead here eviction(S_TRUNK_C, c) // acquire for write eviction(S_TRUNK_CD, c) // dirty release then reacquire } when ((!s_writeback && no_wait) && io.schedule.ready) { transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches transition(S_INVALID, S_TIP, m) // MMIO read transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_INVALID, S_TIP_D, m) // MMIO write transition(S_INVALID, S_TRUNK_C, c) // acquire transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions) transition(S_BRANCH, S_BRANCH_C, b && c) // acquire transition(S_BRANCH, S_TIP, b && m) // prefetch write transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_TIP_D, b && m) // MMIO write transition(S_BRANCH, S_TRUNK_C, b && c) // acquire transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH_C, S_INVALID, b && c && p) transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional) transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_TIP, S_INVALID, p) transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately transition(S_TIP, S_TRUNK_C, c) // acquire transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately transition(S_TIP_C, S_INVALID, c && p) transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional) transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_TIP_C, S_TRUNK_C, c) // acquire transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty transition(S_TIP_D, S_INVALID, p) transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired transition(S_TIP_D, S_TRUNK_CD, c) // acquire transition(S_TIP_CD, S_INVALID, c && p) transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional) transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire transition(S_TIP_CD, S_TRUNK_CD, c) // acquire transition(S_TRUNK_C, S_INVALID, c && p) transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional) transition(S_TRUNK_C, S_TIP_C, c) // bounce shared transition(S_TRUNK_C, S_TIP_D, c) // dirty release transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce transition(S_TRUNK_CD, S_INVALID, c && p) transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TRUNK_CD, S_TIP_D, c) // dirty release transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire } // Handle response messages val probe_bit = params.clientBit(io.sinkc.bits.source) val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client) val probe_toN = isToN(io.sinkc.bits.param) if (!params.firstLevel) when (io.sinkc.valid) { params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B") params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B") // Caution: the probe matches us only in set. // We would never allow an outer probe to nest until both w_[rp]probeack complete, so // it is safe to just unguardedly update the probe FSM. probes_done := probes_done | probe_bit probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U) probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT w_rprobeackfirst := w_rprobeackfirst || last_probe w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last) w_pprobeackfirst := w_pprobeackfirst || last_probe w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last) // Allow wormhole routing from sinkC if the first request beat has offset 0 val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U) w_pprobeack := w_pprobeack || set_pprobeack params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data") params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data") // However, meta-data updates need to be done more cautiously when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!! } when (io.sinkd.valid) { when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) { sink := io.sinkd.bits.sink w_grantfirst := true.B w_grantlast := io.sinkd.bits.last // Record if we need to prevent taking ownership bad_grant := io.sinkd.bits.denied // Allow wormhole routing for requests whose first beat has offset 0 w_grant := request.offset === 0.U || io.sinkd.bits.last params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data") params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data") gotT := io.sinkd.bits.param === toT } .elsewhen (io.sinkd.bits.opcode === ReleaseAck) { w_releaseack := true.B } } when (io.sinke.valid) { w_grantack := true.B } // Bootstrap new requests val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits) val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits) val new_request = Mux(io.allocate.valid, allocate_as_full, request) val new_needT = needT(new_request.opcode, new_request.param) val new_clientBit = params.clientBit(new_request.source) val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U) val prior = cacheState(final_meta_writeback, true.B) def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}") } else { assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}") } } when (io.allocate.valid && io.allocate.bits.repeat) { bypass(S_INVALID, f || p) // Can lose permissions (probe/flush) bypass(S_BRANCH, b) // MMIO read to read-only device bypass(S_BRANCH_C, b && c) // you need children to become C bypass(S_TIP, true) // MMIO read || clean release can lead to this state bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_D, true) // MMIO write || dirty release lead here bypass(S_TRUNK_C, c) // acquire for write bypass(S_TRUNK_CD, c) // dirty release then reacquire } when (io.allocate.valid) { assert (!request_valid || (no_wait && io.schedule.fire)) request_valid := true.B request := io.allocate.bits } // Create execution plan when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) { meta_valid := true.B meta := new_meta probes_done := 0.U probes_toN := 0.U probes_noT := false.B gotT := false.B bad_grant := false.B // These should already be either true or turning true // We clear them here explicitly to simplify the mux tree s_rprobe := true.B w_rprobeackfirst := true.B w_rprobeacklast := true.B s_release := true.B w_releaseack := true.B s_pprobe := true.B s_acquire := true.B s_flush := true.B w_grantfirst := true.B w_grantlast := true.B w_grant := true.B w_pprobeackfirst := true.B w_pprobeacklast := true.B w_pprobeack := true.B s_probeack := true.B s_grantack := true.B s_execute := true.B w_grantack := true.B s_writeback := true.B // For C channel requests (ie: Release[Data]) when (new_request.prio(2) && (!params.firstLevel).B) { s_execute := false.B // Do we need to go dirty? when (new_request.opcode(0) && !new_meta.dirty) { s_writeback := false.B } // Does our state change? when (isToB(new_request.param) && new_meta.state === TRUNK) { s_writeback := false.B } // Do our clients change? when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) { s_writeback := false.B } assert (new_meta.hit) } // For X channel requests (ie: flush) .elsewhen (new_request.control && params.control.B) { // new_request.prio(0) s_flush := false.B // Do we need to actually do something? when (new_meta.hit) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } } // For A channel requests .otherwise { // new_request.prio(0) && !new_request.control s_execute := false.B // Do we need an eviction? when (!new_meta.hit && new_meta.state =/= INVALID) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } // Do we need an acquire? when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) { s_acquire := false.B w_grantfirst := false.B w_grantlast := false.B w_grant := false.B s_grantack := false.B s_writeback := false.B } // Do we need a probe? when ((!params.firstLevel).B && (new_meta.hit && (new_needT || new_meta.state === TRUNK) && (new_meta.clients & ~new_skipProbe) =/= 0.U)) { s_pprobe := false.B w_pprobeackfirst := false.B w_pprobeacklast := false.B w_pprobeack := false.B s_writeback := false.B } // Do we need a grantack? when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) { w_grantack := false.B s_writeback := false.B } // Becomes dirty? when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) { s_writeback := false.B } } } } File Parameters.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property.cover import scala.math.{min,max} case class CacheParameters( level: Int, ways: Int, sets: Int, blockBytes: Int, beatBytes: Int, // inner hintsSkipProbe: Boolean) { require (ways > 0) require (sets > 0) require (blockBytes > 0 && isPow2(blockBytes)) require (beatBytes > 0 && isPow2(beatBytes)) require (blockBytes >= beatBytes) val blocks = ways * sets val sizeBytes = blocks * blockBytes val blockBeats = blockBytes/beatBytes } case class InclusiveCachePortParameters( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams) { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e)) } object InclusiveCachePortParameters { val none = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.none) val full = InclusiveCachePortParameters( a = BufferParams.default, b = BufferParams.default, c = BufferParams.default, d = BufferParams.default, e = BufferParams.default) // This removes feed-through paths from C=>A and A=>C val fullC = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.default, d = BufferParams.none, e = BufferParams.none) val flowAD = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.flow, e = BufferParams.none) val flowAE = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.flow) // For innerBuf: // SinkA: no restrictions, flows into scheduler+putbuffer // SourceB: no restrictions, flows out of scheduler // sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore // SourceD: no restrictions, flows out of bankedStore/regout // SinkE: no restrictions, flows into scheduler // // ... so while none is possible, you probably want at least flowAC to cut ready // from the scheduler delay and flowD to ease SourceD back-pressure // For outerBufer: // SourceA: must not be pipe, flows out of scheduler // SinkB: no restrictions, flows into scheduler // SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored // SinkD: no restrictions, flows into scheduler & bankedStore // SourceE: must not be pipe, flows out of scheduler // // ... AE take the channel ready into the scheduler, so you need at least flowAE } case class InclusiveCacheMicroParameters( writeBytes: Int, // backing store update granularity memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz) portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes dirReg: Boolean = false, innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE { require (writeBytes > 0 && isPow2(writeBytes)) require (memCycles > 0) require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant } case class InclusiveCacheControlParameters( address: BigInt, beatBytes: Int, bankedControl: Boolean) case class InclusiveCacheParameters( cache: CacheParameters, micro: InclusiveCacheMicroParameters, control: Boolean, inner: TLEdgeIn, outer: TLEdgeOut)(implicit val p: Parameters) { require (cache.ways > 1) require (cache.sets > 1 && isPow2(cache.sets)) require (micro.writeBytes <= inner.manager.beatBytes) require (micro.writeBytes <= outer.manager.beatBytes) require (inner.manager.beatBytes <= cache.blockBytes) require (outer.manager.beatBytes <= cache.blockBytes) // Require that all cached address ranges have contiguous blocks outer.manager.managers.flatMap(_.address).foreach { a => require (a.alignment >= cache.blockBytes) } // If we are the first level cache, we do not need to support inner-BCE val firstLevel = !inner.client.clients.exists(_.supports.probe) // If we are the last level cache, we do not need to support outer-B val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED) require (lastLevel) // Provision enough resources to achieve full throughput with missing single-beat accesses val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro) val secondary = max(mshrs, micro.memCycles - mshrs) val putLists = micro.memCycles // allow every request to be single beat val putBeats = max(2*cache.blockBeats, micro.memCycles) val relLists = 2 val relBeats = relLists*cache.blockBeats val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address)) val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_)) def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] = if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail) val addressMapping = bitOffsets(pickMask) val addressBits = addressMapping.size // println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}") val allClients = inner.client.clients.size val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size val clientBits = max(1, clientBitsRaw) val stateBits = 2 val wayBits = log2Ceil(cache.ways) val setBits = log2Ceil(cache.sets) val offsetBits = log2Ceil(cache.blockBytes) val tagBits = addressBits - setBits - offsetBits val putBits = log2Ceil(max(putLists, relLists)) require (tagBits > 0) require (offsetBits > 0) val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1 val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1 val innerMaskBits = inner.manager.beatBytes / micro.writeBytes val outerMaskBits = outer.manager.beatBytes / micro.writeBytes def clientBit(source: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse) } } def clientSource(bit: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U)) } } def parseAddress(x: UInt): (UInt, UInt, UInt) = { val offset = Cat(addressMapping.map(o => x(o,o)).reverse) val set = offset >> offsetBits val tag = set >> setBits (tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0)) } def widen(x: UInt, width: Int): UInt = { val y = x | 0.U(width.W) assert (y >> width === 0.U) y(width-1, 0) } def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = { val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits)) val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) } addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) } Cat(bits.reverse) } def restoreAddress(expanded: UInt): UInt = { val missingBits = flatAddresses .map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match .groupBy(_._1) .view .mapValues(_.map(_._2)) val muxMask = AddressDecoder(missingBits.values.toList) val mux = missingBits.toList.map { case (bits, addrs) => val widen = addrs.map(_.widen(~muxMask)) val matches = AddressSet .unify(widen.distinct) .map(_.contains(expanded)) .reduce(_ || _) (matches, bits.U) } expanded | Mux1H(mux) } def dirReg[T <: Data](x: T, en: Bool = true.B): T = { if (micro.dirReg) RegEnable(x, en) else x } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc) } object MetaData { val stateBits = 2 def INVALID: UInt = 0.U(stateBits.W) // way is empty def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch // Does a request need trunk? def needT(opcode: UInt, param: UInt): Bool = { !opcode(2) || (opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) || ((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB) } // Does a request prove the client need not be probed? def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = { // Acquire(toB) and Get => is N, so no probe // Acquire(*toT) => is N or B, but need T, so no probe // Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client // Put* => is N or B, so probe IS needed opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B) } def isToN(param: UInt): Bool = { param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN } def isToB(param: UInt): Bool = { param === TLPermissions.TtoB || param === TLPermissions.BtoB } } object InclusiveCacheParameters { val lfsrBits = 10 val L2ControlAddress = 0x2010000 val L2ControlSize = 0x1000 def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = { // We need 2-3 normal MSHRs to cover the Directory latency // To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats) } def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = // We need a dedicated MSHR for B+C each 2 + out_mshrs(cache, micro) } class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
module MSHR_22( // @[MSHR.scala:84:7] input clock, // @[MSHR.scala:84:7] input reset, // @[MSHR.scala:84:7] input io_allocate_valid, // @[MSHR.scala:86:14] input io_allocate_bits_prio_1, // @[MSHR.scala:86:14] input io_allocate_bits_prio_2, // @[MSHR.scala:86:14] input io_allocate_bits_control, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_source, // @[MSHR.scala:86:14] input [8:0] io_allocate_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14] input [10:0] io_allocate_bits_set, // @[MSHR.scala:86:14] input io_allocate_bits_repeat, // @[MSHR.scala:86:14] input io_directory_valid, // @[MSHR.scala:86:14] input io_directory_bits_dirty, // @[MSHR.scala:86:14] input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14] input io_directory_bits_clients, // @[MSHR.scala:86:14] input [8:0] io_directory_bits_tag, // @[MSHR.scala:86:14] input io_directory_bits_hit, // @[MSHR.scala:86:14] input [3:0] io_directory_bits_way, // @[MSHR.scala:86:14] output io_status_valid, // @[MSHR.scala:86:14] output [10:0] io_status_bits_set, // @[MSHR.scala:86:14] output [8:0] io_status_bits_tag, // @[MSHR.scala:86:14] output [3:0] io_status_bits_way, // @[MSHR.scala:86:14] output io_status_bits_blockB, // @[MSHR.scala:86:14] output io_status_bits_nestB, // @[MSHR.scala:86:14] output io_status_bits_blockC, // @[MSHR.scala:86:14] output io_status_bits_nestC, // @[MSHR.scala:86:14] input io_schedule_ready, // @[MSHR.scala:86:14] output io_schedule_valid, // @[MSHR.scala:86:14] output io_schedule_bits_a_valid, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14] output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14] output io_schedule_bits_b_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14] output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14] output io_schedule_bits_c_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14] output [3:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14] output io_schedule_bits_d_valid, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14] output [3:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14] output io_schedule_bits_e_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14] output io_schedule_bits_x_valid, // @[MSHR.scala:86:14] output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14] output [10:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14] output [3:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14] output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14] output [8:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14] output io_schedule_bits_reload, // @[MSHR.scala:86:14] input io_sinkc_valid, // @[MSHR.scala:86:14] input io_sinkc_bits_last, // @[MSHR.scala:86:14] input [10:0] io_sinkc_bits_set, // @[MSHR.scala:86:14] input [8:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_sinkc_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14] input io_sinkc_bits_data, // @[MSHR.scala:86:14] input io_sinkd_valid, // @[MSHR.scala:86:14] input io_sinkd_bits_last, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14] input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14] input io_sinkd_bits_denied, // @[MSHR.scala:86:14] input io_sinke_valid, // @[MSHR.scala:86:14] input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14] input [10:0] io_nestedwb_set, // @[MSHR.scala:86:14] input [8:0] io_nestedwb_tag, // @[MSHR.scala:86:14] input io_nestedwb_b_toN, // @[MSHR.scala:86:14] input io_nestedwb_b_toB, // @[MSHR.scala:86:14] input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14] input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14] ); wire [8:0] final_meta_writeback_tag; // @[MSHR.scala:215:38] wire final_meta_writeback_clients; // @[MSHR.scala:215:38] wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38] wire final_meta_writeback_dirty; // @[MSHR.scala:215:38] wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7] wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7] wire [8:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7] wire [10:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7] wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7] wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7] wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7] wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7] wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7] wire [8:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7] wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7] wire [3:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7] wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7] wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7] wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7] wire [10:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7] wire [8:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7] wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7] wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7] wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7] wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7] wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7] wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7] wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7] wire [10:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7] wire [8:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7] wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7] wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7] wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7] wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_0 = 1'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7] wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68] wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80] wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21] wire invalid_clients = 1'h0; // @[MSHR.scala:268:21] wire _excluded_client_T = 1'h0; // @[MSHR.scala:279:38] wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137] wire _excluded_client_T_9 = 1'h0; // @[MSHR.scala:279:57] wire excluded_client = 1'h0; // @[MSHR.scala:279:28] wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11] wire allocate_as_full_prio_0 = 1'h0; // @[MSHR.scala:504:34] wire new_request_prio_0 = 1'h0; // @[MSHR.scala:506:24] wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137] wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11] wire _io_schedule_bits_b_bits_clients_T = 1'h1; // @[MSHR.scala:289:53] wire _last_probe_T_1 = 1'h1; // @[MSHR.scala:459:66] wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7] wire [8:0] invalid_tag = 9'h0; // @[MSHR.scala:268:21] wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21] wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70] wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34] wire [8:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34] wire [10:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34] wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40] wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93] wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28] wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39] wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105] wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55] wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91] wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41] wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41] wire [8:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41] wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51] wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64] wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41] wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41] wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57] wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41] wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43] wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40] wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66] wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41] wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41] wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41] wire [8:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41] wire no_wait; // @[MSHR.scala:183:83] wire [10:0] io_status_bits_set_0; // @[MSHR.scala:84:7] wire [8:0] io_status_bits_tag_0; // @[MSHR.scala:84:7] wire [3:0] io_status_bits_way_0; // @[MSHR.scala:84:7] wire io_status_bits_blockB_0; // @[MSHR.scala:84:7] wire io_status_bits_nestB_0; // @[MSHR.scala:84:7] wire io_status_bits_blockC_0; // @[MSHR.scala:84:7] wire io_status_bits_nestC_0; // @[MSHR.scala:84:7] wire io_status_valid_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7] wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7] wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7] wire [8:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7] wire [10:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7] wire io_schedule_valid_0; // @[MSHR.scala:84:7] reg request_valid; // @[MSHR.scala:97:30] assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30] reg request_prio_1; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20] reg request_prio_2; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20] reg request_control; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_opcode; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_param; // @[MSHR.scala:98:20] reg [2:0] request_size; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_source; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20] reg [8:0] request_tag; // @[MSHR.scala:98:20] assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_offset; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_put; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20] reg [10:0] request_set; // @[MSHR.scala:98:20] assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] reg meta_valid; // @[MSHR.scala:99:27] reg meta_dirty; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17] reg [1:0] meta_state; // @[MSHR.scala:100:17] reg meta_clients; // @[MSHR.scala:100:17] wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39] assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients; // @[MSHR.scala:100:17, :289:51] wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27] wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27] wire _last_probe_T_2 = meta_clients; // @[MSHR.scala:100:17, :459:64] reg [8:0] meta_tag; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17] reg meta_hit; // @[MSHR.scala:100:17] reg [3:0] meta_way; // @[MSHR.scala:100:17] assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] wire [3:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38] reg s_rprobe; // @[MSHR.scala:121:33] reg w_rprobeackfirst; // @[MSHR.scala:122:33] reg w_rprobeacklast; // @[MSHR.scala:123:33] reg s_release; // @[MSHR.scala:124:33] reg w_releaseack; // @[MSHR.scala:125:33] reg s_pprobe; // @[MSHR.scala:126:33] reg s_acquire; // @[MSHR.scala:127:33] reg s_flush; // @[MSHR.scala:128:33] reg w_grantfirst; // @[MSHR.scala:129:33] reg w_grantlast; // @[MSHR.scala:130:33] reg w_grant; // @[MSHR.scala:131:33] reg w_pprobeackfirst; // @[MSHR.scala:132:33] reg w_pprobeacklast; // @[MSHR.scala:133:33] reg w_pprobeack; // @[MSHR.scala:134:33] reg s_grantack; // @[MSHR.scala:136:33] reg s_execute; // @[MSHR.scala:137:33] reg w_grantack; // @[MSHR.scala:138:33] reg s_writeback; // @[MSHR.scala:139:33] reg [2:0] sink; // @[MSHR.scala:147:17] assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17] reg gotT; // @[MSHR.scala:148:17] reg bad_grant; // @[MSHR.scala:149:22] assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22] reg probes_done; // @[MSHR.scala:150:24] reg probes_toN; // @[MSHR.scala:151:23] reg probes_noT; // @[MSHR.scala:152:23] wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28] wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45] wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62] wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}] wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82] wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}] wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103] wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}] assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}] assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40] wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39] wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}] wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}] wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96] assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}] assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93] assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28] assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28] wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43] wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64] wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}] wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85] wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}] assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}] assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39] wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33] wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}] wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}] assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}] assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83] wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31] wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}] assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}] assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55] wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31] wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44] assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}] assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41] wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32] wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}] assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}] assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64] wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31] wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}] assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}] assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57] wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31] assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}] assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43] wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31] assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}] assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40] wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34] wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}] wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70] wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}] assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}] assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66] wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49] wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}] wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}] wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49] wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}] assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}] assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105] wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71] wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71] wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71] wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire [8:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71] wire final_meta_writeback_hit; // @[MSHR.scala:215:38] wire req_clientBit = request_source == 6'h28; // @[Parameters.scala:46:9] wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12] wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12] wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _req_needT_T_2; // @[Parameters.scala:270:13] assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13] wire _excluded_client_T_6; // @[Parameters.scala:279:117] assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117] wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42] wire _req_needT_T_3; // @[Parameters.scala:270:42] assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42] wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11] assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11] wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42] wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _req_needT_T_6; // @[Parameters.scala:271:14] assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14] wire _req_acquire_T; // @[MSHR.scala:219:36] assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14] wire _excluded_client_T_1; // @[Parameters.scala:279:12] assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12] wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52] wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89] wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52] wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}] wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}] wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81] wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}] wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}] wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}] wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65] wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}] wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55] wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78] wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78] assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78] wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70] wire _evict_T_2; // @[MSHR.scala:317:26] assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _before_T_1; // @[MSHR.scala:317:26] assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}] wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}] wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43] assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43] wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}] wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75] wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}] wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}] wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}] wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54] wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}] wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45] wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}] wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}] wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40] wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40] assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40] wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65] assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65] wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41] wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}] wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72] wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}] wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70] wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70] wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53] assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53] wire _evict_T_1; // @[MSHR.scala:317:26] assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire _before_T; // @[MSHR.scala:317:26] assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70] wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70] wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55] wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70] wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70] wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66] wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}] wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}] wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40] assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30] wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54] wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}] assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21] assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21] assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36] assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36] wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9] wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}] wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}] wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50] wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}] wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}] wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}] wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56] wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70] assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}] wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51] wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55] wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52] wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}] wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}] assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38] assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91] wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42] wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70] wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}] assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}] assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41] wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42] assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}] assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41] assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51] assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41] assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41] assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}] assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41] wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42] wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53] wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53] wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89] wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53] wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53] wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79] assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41] wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42] assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 9'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}] assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41] wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32] wire [3:0] evict; // @[MSHR.scala:314:26] wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32] wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32] wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32] assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32] assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39] wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39] assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39] assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76] wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76] assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76] assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32] assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] before_0; // @[MSHR.scala:314:26] wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32] wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11] assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] after; // @[MSHR.scala:314:26] wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26] wire _after_T; // @[MSHR.scala:317:26] assign _after_T = _GEN_9; // @[MSHR.scala:317:26] wire _prior_T; // @[MSHR.scala:317:26] assign _prior_T = _GEN_9; // @[MSHR.scala:317:26] wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32] wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26] wire _after_T_1; // @[MSHR.scala:317:26] assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire _prior_T_1; // @[MSHR.scala:317:26] assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32] wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32] assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32] assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39] wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39] assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39] assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76] wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76] assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76] assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26] wire _after_T_3; // @[MSHR.scala:317:26] assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26] wire _prior_T_3; // @[MSHR.scala:317:26] assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26] assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire probe_bit = io_sinkc_bits_source_0 == 6'h28; // @[Parameters.scala:46:9] wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9] wire _last_probe_T; // @[MSHR.scala:459:33] assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33] wire _probes_done_T; // @[MSHR.scala:467:32] assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32] wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}] wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11] wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43] wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}] wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75] wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}] wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9] wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}] wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53] wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}] wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42] wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55] wire _w_rprobeacklast_T; // @[MSHR.scala:471:55] assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55] wire _w_pprobeacklast_T; // @[MSHR.scala:473:55] assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55] wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}] wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42] wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}] wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77] wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}] wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}] wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32] wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33] wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}] wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35] wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40] wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [8:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [3:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [8:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [10:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12] wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _new_needT_T_2; // @[Parameters.scala:270:13] assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13] wire _new_skipProbe_T_5; // @[Parameters.scala:279:117] assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117] wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42] wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _new_needT_T_6; // @[Parameters.scala:271:14] assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14] wire _new_skipProbe_T; // @[Parameters.scala:279:12] assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12] wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52] wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89] wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire new_clientBit = new_request_source == 6'h28; // @[Parameters.scala:46:9] wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50] wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}] wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}] wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}] wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9] wire [3:0] prior; // @[MSHR.scala:314:26] wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32] wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28]
Generate the Verilog code corresponding to the following Chisel files. File Scratchpad.scala: package gemmini import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} import freechips.rocketchip.rocket._ import freechips.rocketchip.tile._ import freechips.rocketchip.tilelink._ import Util._ class ScratchpadMemReadRequest[U <: Data](local_addr_t: LocalAddr, scale_t_bits: Int)(implicit p: Parameters) extends CoreBundle { val vaddr = UInt(coreMaxAddrBits.W) val laddr = local_addr_t.cloneType val cols = UInt(16.W) // TODO don't use a magic number for the width here val repeats = UInt(16.W) // TODO don't use a magic number for the width here val scale = UInt(scale_t_bits.W) val has_acc_bitwidth = Bool() val all_zeros = Bool() val block_stride = UInt(16.W) // TODO magic numbers val pixel_repeats = UInt(8.W) // TODO magic numbers val cmd_id = UInt(8.W) // TODO don't use a magic number here val status = new MStatus } class ScratchpadMemWriteRequest(local_addr_t: LocalAddr, acc_t_bits: Int, scale_t_bits: Int) (implicit p: Parameters) extends CoreBundle { val vaddr = UInt(coreMaxAddrBits.W) val laddr = local_addr_t.cloneType val acc_act = UInt(Activation.bitwidth.W) // TODO don't use a magic number for the width here val acc_scale = UInt(scale_t_bits.W) val acc_igelu_qb = UInt(acc_t_bits.W) val acc_igelu_qc = UInt(acc_t_bits.W) val acc_iexp_qln2 = UInt(acc_t_bits.W) val acc_iexp_qln2_inv = UInt(acc_t_bits.W) val acc_norm_stats_id = UInt(8.W) // TODO magic number val len = UInt(16.W) // TODO don't use a magic number for the width here val block = UInt(8.W) // TODO don't use a magic number for the width here val cmd_id = UInt(8.W) // TODO don't use a magic number here val status = new MStatus // Pooling variables val pool_en = Bool() val store_en = Bool() } class ScratchpadMemWriteResponse extends Bundle { val cmd_id = UInt(8.W) // TODO don't use a magic number here } class ScratchpadMemReadResponse extends Bundle { val bytesRead = UInt(16.W) // TODO magic number here val cmd_id = UInt(8.W) // TODO don't use a magic number here } class ScratchpadReadMemIO[U <: Data](local_addr_t: LocalAddr, scale_t_bits: Int)(implicit p: Parameters) extends CoreBundle { val req = Decoupled(new ScratchpadMemReadRequest(local_addr_t, scale_t_bits)) val resp = Flipped(Valid(new ScratchpadMemReadResponse)) } class ScratchpadWriteMemIO(local_addr_t: LocalAddr, acc_t_bits: Int, scale_t_bits: Int) (implicit p: Parameters) extends CoreBundle { val req = Decoupled(new ScratchpadMemWriteRequest(local_addr_t, acc_t_bits, scale_t_bits)) val resp = Flipped(Valid(new ScratchpadMemWriteResponse)) } class ScratchpadReadReq(val n: Int) extends Bundle { val addr = UInt(log2Ceil(n).W) val fromDMA = Bool() } class ScratchpadReadResp(val w: Int) extends Bundle { val data = UInt(w.W) val fromDMA = Bool() } class ScratchpadReadIO(val n: Int, val w: Int) extends Bundle { val req = Decoupled(new ScratchpadReadReq(n)) val resp = Flipped(Decoupled(new ScratchpadReadResp(w))) } class ScratchpadWriteIO(val n: Int, val w: Int, val mask_len: Int) extends Bundle { val en = Output(Bool()) val addr = Output(UInt(log2Ceil(n).W)) val mask = Output(Vec(mask_len, Bool())) val data = Output(UInt(w.W)) } class ScratchpadBank(n: Int, w: Int, aligned_to: Int, single_ported: Boolean, use_shared_ext_mem: Boolean, is_dummy: Boolean) extends Module { // This is essentially a pipelined SRAM with the ability to stall pipeline stages require(w % aligned_to == 0 || w < aligned_to) val mask_len = (w / (aligned_to * 8)) max 1 // How many mask bits are there? val mask_elem = UInt((w min (aligned_to * 8)).W) // What datatype does each mask bit correspond to? val io = IO(new Bundle { val read = Flipped(new ScratchpadReadIO(n, w)) val write = Flipped(new ScratchpadWriteIO(n, w, mask_len)) val ext_mem = if (use_shared_ext_mem) Some(new ExtMemIO) else None }) val (read, write) = if (is_dummy) { def read(addr: UInt, ren: Bool): Data = 0.U def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]): Unit = { } (read _, write _) } else if (use_shared_ext_mem) { def read(addr: UInt, ren: Bool): Data = { io.ext_mem.get.read_en := ren io.ext_mem.get.read_addr := addr io.ext_mem.get.read_data } io.ext_mem.get.write_en := false.B io.ext_mem.get.write_addr := DontCare io.ext_mem.get.write_data := DontCare io.ext_mem.get.write_mask := DontCare def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]) = { io.ext_mem.get.write_en := true.B io.ext_mem.get.write_addr := addr io.ext_mem.get.write_data := wdata.asUInt io.ext_mem.get.write_mask := wmask.asUInt } (read _, write _) } else { val mem = SyncReadMem(n, Vec(mask_len, mask_elem)) def read(addr: UInt, ren: Bool): Data = mem.read(addr, ren) def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]) = mem.write(addr, wdata, wmask) (read _, write _) } // When the scratchpad is single-ported, the writes take precedence val singleport_busy_with_write = single_ported.B && io.write.en when (io.write.en) { if (aligned_to >= w) write(io.write.addr, io.write.data.asTypeOf(Vec(mask_len, mask_elem)), VecInit((~(0.U(mask_len.W))).asBools)) else write(io.write.addr, io.write.data.asTypeOf(Vec(mask_len, mask_elem)), io.write.mask) } val raddr = io.read.req.bits.addr val ren = io.read.req.fire val rdata = if (single_ported) { assert(!(ren && io.write.en)) read(raddr, ren && !io.write.en).asUInt } else { read(raddr, ren).asUInt } val fromDMA = io.read.req.bits.fromDMA // Make a queue which buffers the result of an SRAM read if it can't immediately be consumed val q = Module(new Queue(new ScratchpadReadResp(w), 1, true, true)) q.io.enq.valid := RegNext(ren) q.io.enq.bits.data := rdata q.io.enq.bits.fromDMA := RegNext(fromDMA) val q_will_be_empty = (q.io.count +& q.io.enq.fire) - q.io.deq.fire === 0.U io.read.req.ready := q_will_be_empty && !singleport_busy_with_write io.read.resp <> q.io.deq } class Scratchpad[T <: Data, U <: Data, V <: Data](config: GemminiArrayConfig[T, U, V]) (implicit p: Parameters, ev: Arithmetic[T]) extends LazyModule { import config._ import ev._ val maxBytes = dma_maxbytes val dataBits = dma_buswidth val block_rows = meshRows * tileRows val block_cols = meshColumns * tileColumns val spad_w = inputType.getWidth * block_cols val acc_w = accType.getWidth * block_cols val id_node = TLIdentityNode() val xbar_node = TLXbar() val reader = LazyModule(new StreamReader(config, max_in_flight_mem_reqs, dataBits, maxBytes, spad_w, acc_w, aligned_to, sp_banks * sp_bank_entries, acc_banks * acc_bank_entries, block_rows, use_tlb_register_filter, use_firesim_simulation_counters)) val writer = LazyModule(new StreamWriter(max_in_flight_mem_reqs, dataBits, maxBytes, if (acc_read_full_width) acc_w else spad_w, aligned_to, inputType, block_cols, use_tlb_register_filter, use_firesim_simulation_counters)) // TODO make a cross-bar vs two separate ports a config option // id_node :=* reader.node // id_node :=* writer.node xbar_node := TLBuffer() := reader.node // TODO xbar_node := TLBuffer() := writer.node id_node := TLWidthWidget(config.dma_buswidth/8) := TLBuffer() := xbar_node lazy val module = new Impl class Impl extends LazyModuleImp(this) with HasCoreParameters { val io = IO(new Bundle { // DMA ports val dma = new Bundle { val read = Flipped(new ScratchpadReadMemIO(local_addr_t, mvin_scale_t_bits)) val write = Flipped(new ScratchpadWriteMemIO(local_addr_t, accType.getWidth, acc_scale_t_bits)) } // SRAM ports val srams = new Bundle { val read = Flipped(Vec(sp_banks, new ScratchpadReadIO(sp_bank_entries, spad_w))) val write = Flipped(Vec(sp_banks, new ScratchpadWriteIO(sp_bank_entries, spad_w, (spad_w / (aligned_to * 8)) max 1))) } // Accumulator ports val acc = new Bundle { val read_req = Flipped(Vec(acc_banks, Decoupled(new AccumulatorReadReq( acc_bank_entries, accType, acc_scale_t.asInstanceOf[V] )))) val read_resp = Vec(acc_banks, Decoupled(new AccumulatorScaleResp( Vec(meshColumns, Vec(tileColumns, inputType)), Vec(meshColumns, Vec(tileColumns, accType)) ))) val write = Flipped(Vec(acc_banks, Decoupled(new AccumulatorWriteReq( acc_bank_entries, Vec(meshColumns, Vec(tileColumns, accType)) )))) } val ext_mem = if (use_shared_ext_mem) { Some(new ExtSpadMemIO(sp_banks, acc_banks, acc_sub_banks)) } else { None } // TLB ports val tlb = Vec(2, new FrontendTLBIO) // Misc. ports val busy = Output(Bool()) val flush = Input(Bool()) val counter = new CounterEventIO() }) val write_dispatch_q = Queue(io.dma.write.req) // Write norm/scale queues are necessary to maintain in-order requests to accumulator norm/scale units // Writes from main SPAD just flow directly between scale_q and issue_q, while writes // From acc are ordered val write_norm_q = Module(new Queue(new ScratchpadMemWriteRequest(local_addr_t, accType.getWidth, acc_scale_t_bits), spad_read_delay+2)) val write_scale_q = Module(new Queue(new ScratchpadMemWriteRequest(local_addr_t, accType.getWidth, acc_scale_t_bits), spad_read_delay+2)) val write_issue_q = Module(new Queue(new ScratchpadMemWriteRequest(local_addr_t, accType.getWidth, acc_scale_t_bits), spad_read_delay+1, pipe=true)) val read_issue_q = Module(new Queue(new ScratchpadMemReadRequest(local_addr_t, mvin_scale_t_bits), spad_read_delay+1, pipe=true)) // TODO can't this just be a normal queue? write_dispatch_q.ready := false.B write_norm_q.io.enq.valid := false.B write_norm_q.io.enq.bits := write_dispatch_q.bits write_norm_q.io.deq.ready := false.B write_scale_q.io.enq.valid := false.B write_scale_q.io.enq.bits := write_norm_q.io.deq.bits write_scale_q.io.deq.ready := false.B write_issue_q.io.enq.valid := false.B write_issue_q.io.enq.bits := write_scale_q.io.deq.bits // Garbage can immediately fire from dispatch_q -> norm_q when (write_dispatch_q.bits.laddr.is_garbage()) { write_norm_q.io.enq <> write_dispatch_q } // Non-acc or garbage can immediately fire between norm_q and scale_q when (write_norm_q.io.deq.bits.laddr.is_garbage() || !write_norm_q.io.deq.bits.laddr.is_acc_addr) { write_scale_q.io.enq <> write_norm_q.io.deq } // Non-acc or garbage can immediately fire between scale_q and issue_q when (write_scale_q.io.deq.bits.laddr.is_garbage() || !write_scale_q.io.deq.bits.laddr.is_acc_addr) { write_issue_q.io.enq <> write_scale_q.io.deq } val writeData = Wire(Valid(UInt((spad_w max acc_w).W))) writeData.valid := write_issue_q.io.deq.bits.laddr.is_garbage() writeData.bits := DontCare val fullAccWriteData = Wire(UInt(acc_w.W)) fullAccWriteData := DontCare val writeData_is_full_width = !write_issue_q.io.deq.bits.laddr.is_garbage() && write_issue_q.io.deq.bits.laddr.is_acc_addr && write_issue_q.io.deq.bits.laddr.read_full_acc_row val writeData_is_all_zeros = write_issue_q.io.deq.bits.laddr.is_garbage() writer.module.io.req.valid := write_issue_q.io.deq.valid && writeData.valid write_issue_q.io.deq.ready := writer.module.io.req.ready && writeData.valid writer.module.io.req.bits.vaddr := write_issue_q.io.deq.bits.vaddr writer.module.io.req.bits.len := Mux(writeData_is_full_width, write_issue_q.io.deq.bits.len * (accType.getWidth / 8).U, write_issue_q.io.deq.bits.len * (inputType.getWidth / 8).U) writer.module.io.req.bits.data := MuxCase(writeData.bits, Seq( writeData_is_all_zeros -> 0.U, writeData_is_full_width -> fullAccWriteData )) writer.module.io.req.bits.block := write_issue_q.io.deq.bits.block writer.module.io.req.bits.status := write_issue_q.io.deq.bits.status writer.module.io.req.bits.pool_en := write_issue_q.io.deq.bits.pool_en writer.module.io.req.bits.store_en := write_issue_q.io.deq.bits.store_en io.dma.write.resp.valid := false.B io.dma.write.resp.bits.cmd_id := write_dispatch_q.bits.cmd_id when (write_dispatch_q.bits.laddr.is_garbage() && write_dispatch_q.fire) { io.dma.write.resp.valid := true.B } read_issue_q.io.enq <> io.dma.read.req val zero_writer = Module(new ZeroWriter(config, new ScratchpadMemReadRequest(local_addr_t, mvin_scale_t_bits))) when (io.dma.read.req.bits.all_zeros) { read_issue_q.io.enq.valid := false.B io.dma.read.req.ready := zero_writer.io.req.ready } zero_writer.io.req.valid := io.dma.read.req.valid && io.dma.read.req.bits.all_zeros zero_writer.io.req.bits.laddr := io.dma.read.req.bits.laddr zero_writer.io.req.bits.cols := io.dma.read.req.bits.cols zero_writer.io.req.bits.block_stride := io.dma.read.req.bits.block_stride zero_writer.io.req.bits.tag := io.dma.read.req.bits val zero_writer_pixel_repeater = Module(new PixelRepeater(inputType, local_addr_t, block_cols, aligned_to, new ScratchpadMemReadRequest(local_addr_t, mvin_scale_t_bits), passthrough = !has_first_layer_optimizations)) zero_writer_pixel_repeater.io.req.valid := zero_writer.io.resp.valid zero_writer_pixel_repeater.io.req.bits.in := 0.U.asTypeOf(Vec(block_cols, inputType)) zero_writer_pixel_repeater.io.req.bits.laddr := zero_writer.io.resp.bits.laddr zero_writer_pixel_repeater.io.req.bits.len := zero_writer.io.resp.bits.tag.cols zero_writer_pixel_repeater.io.req.bits.pixel_repeats := zero_writer.io.resp.bits.tag.pixel_repeats zero_writer_pixel_repeater.io.req.bits.last := zero_writer.io.resp.bits.last zero_writer_pixel_repeater.io.req.bits.tag := zero_writer.io.resp.bits.tag zero_writer_pixel_repeater.io.req.bits.mask := { val n = inputType.getWidth / 8 val mask = zero_writer.io.resp.bits.mask val expanded = VecInit(mask.flatMap(e => Seq.fill(n)(e))) expanded } zero_writer.io.resp.ready := zero_writer_pixel_repeater.io.req.ready zero_writer_pixel_repeater.io.resp.ready := false.B reader.module.io.req.valid := read_issue_q.io.deq.valid read_issue_q.io.deq.ready := reader.module.io.req.ready reader.module.io.req.bits.vaddr := read_issue_q.io.deq.bits.vaddr reader.module.io.req.bits.spaddr := Mux(read_issue_q.io.deq.bits.laddr.is_acc_addr, read_issue_q.io.deq.bits.laddr.full_acc_addr(), read_issue_q.io.deq.bits.laddr.full_sp_addr()) reader.module.io.req.bits.len := read_issue_q.io.deq.bits.cols reader.module.io.req.bits.repeats := read_issue_q.io.deq.bits.repeats reader.module.io.req.bits.pixel_repeats := read_issue_q.io.deq.bits.pixel_repeats reader.module.io.req.bits.scale := read_issue_q.io.deq.bits.scale reader.module.io.req.bits.is_acc := read_issue_q.io.deq.bits.laddr.is_acc_addr reader.module.io.req.bits.accumulate := read_issue_q.io.deq.bits.laddr.accumulate reader.module.io.req.bits.has_acc_bitwidth := read_issue_q.io.deq.bits.has_acc_bitwidth reader.module.io.req.bits.block_stride := read_issue_q.io.deq.bits.block_stride reader.module.io.req.bits.status := read_issue_q.io.deq.bits.status reader.module.io.req.bits.cmd_id := read_issue_q.io.deq.bits.cmd_id val (mvin_scale_in, mvin_scale_out) = VectorScalarMultiplier( config.mvin_scale_args, config.inputType, config.meshColumns * config.tileColumns, chiselTypeOf(reader.module.io.resp.bits), is_acc = false ) val (mvin_scale_acc_in, mvin_scale_acc_out) = if (mvin_scale_shared) (mvin_scale_in, mvin_scale_out) else ( VectorScalarMultiplier( config.mvin_scale_acc_args, config.accType, config.meshColumns * config.tileColumns, chiselTypeOf(reader.module.io.resp.bits), is_acc = true ) ) mvin_scale_in.valid := reader.module.io.resp.valid && (mvin_scale_shared.B || !reader.module.io.resp.bits.is_acc || (reader.module.io.resp.bits.is_acc && !reader.module.io.resp.bits.has_acc_bitwidth)) mvin_scale_in.bits.in := reader.module.io.resp.bits.data.asTypeOf(chiselTypeOf(mvin_scale_in.bits.in)) mvin_scale_in.bits.scale := reader.module.io.resp.bits.scale.asTypeOf(mvin_scale_t) mvin_scale_in.bits.repeats := reader.module.io.resp.bits.repeats mvin_scale_in.bits.pixel_repeats := reader.module.io.resp.bits.pixel_repeats mvin_scale_in.bits.last := reader.module.io.resp.bits.last mvin_scale_in.bits.tag := reader.module.io.resp.bits val mvin_scale_pixel_repeater = Module(new PixelRepeater(inputType, local_addr_t, block_cols, aligned_to, mvin_scale_out.bits.tag.cloneType, passthrough = !has_first_layer_optimizations)) mvin_scale_pixel_repeater.io.req.valid := mvin_scale_out.valid mvin_scale_pixel_repeater.io.req.bits.in := mvin_scale_out.bits.out mvin_scale_pixel_repeater.io.req.bits.mask := mvin_scale_out.bits.tag.mask take mvin_scale_pixel_repeater.io.req.bits.mask.size mvin_scale_pixel_repeater.io.req.bits.laddr := mvin_scale_out.bits.tag.addr.asTypeOf(local_addr_t) + mvin_scale_out.bits.row mvin_scale_pixel_repeater.io.req.bits.len := mvin_scale_out.bits.tag.len mvin_scale_pixel_repeater.io.req.bits.pixel_repeats := mvin_scale_out.bits.tag.pixel_repeats mvin_scale_pixel_repeater.io.req.bits.last := mvin_scale_out.bits.last mvin_scale_pixel_repeater.io.req.bits.tag := mvin_scale_out.bits.tag mvin_scale_out.ready := mvin_scale_pixel_repeater.io.req.ready mvin_scale_pixel_repeater.io.resp.ready := false.B if (!mvin_scale_shared) { mvin_scale_acc_in.valid := reader.module.io.resp.valid && (reader.module.io.resp.bits.is_acc && reader.module.io.resp.bits.has_acc_bitwidth) mvin_scale_acc_in.bits.in := reader.module.io.resp.bits.data.asTypeOf(chiselTypeOf(mvin_scale_acc_in.bits.in)) mvin_scale_acc_in.bits.scale := reader.module.io.resp.bits.scale.asTypeOf(mvin_scale_acc_t) mvin_scale_acc_in.bits.repeats := reader.module.io.resp.bits.repeats mvin_scale_acc_in.bits.pixel_repeats := 1.U mvin_scale_acc_in.bits.last := reader.module.io.resp.bits.last mvin_scale_acc_in.bits.tag := reader.module.io.resp.bits mvin_scale_acc_out.ready := false.B } reader.module.io.resp.ready := Mux(reader.module.io.resp.bits.is_acc && reader.module.io.resp.bits.has_acc_bitwidth, mvin_scale_acc_in.ready, mvin_scale_in.ready) val mvin_scale_finished = mvin_scale_pixel_repeater.io.resp.fire && mvin_scale_pixel_repeater.io.resp.bits.last val mvin_scale_acc_finished = mvin_scale_acc_out.fire && mvin_scale_acc_out.bits.last val zero_writer_finished = zero_writer_pixel_repeater.io.resp.fire && zero_writer_pixel_repeater.io.resp.bits.last val zero_writer_bytes_read = Mux(zero_writer_pixel_repeater.io.resp.bits.laddr.is_acc_addr, zero_writer_pixel_repeater.io.resp.bits.tag.cols * (accType.getWidth / 8).U, zero_writer_pixel_repeater.io.resp.bits.tag.cols * (inputType.getWidth / 8).U) // For DMA read responses, mvin_scale gets first priority, then mvin_scale_acc, and then zero_writer io.dma.read.resp.valid := mvin_scale_finished || mvin_scale_acc_finished || zero_writer_finished // io.dma.read.resp.bits.cmd_id := MuxCase(zero_writer.io.resp.bits.tag.cmd_id, Seq( io.dma.read.resp.bits.cmd_id := MuxCase(zero_writer_pixel_repeater.io.resp.bits.tag.cmd_id, Seq( // mvin_scale_finished -> mvin_scale_out.bits.tag.cmd_id, mvin_scale_finished -> mvin_scale_pixel_repeater.io.resp.bits.tag.cmd_id, mvin_scale_acc_finished -> mvin_scale_acc_out.bits.tag.cmd_id)) io.dma.read.resp.bits.bytesRead := MuxCase(zero_writer_bytes_read, Seq( // mvin_scale_finished -> mvin_scale_out.bits.tag.bytes_read, mvin_scale_finished -> mvin_scale_pixel_repeater.io.resp.bits.tag.bytes_read, mvin_scale_acc_finished -> mvin_scale_acc_out.bits.tag.bytes_read)) io.tlb(0) <> writer.module.io.tlb io.tlb(1) <> reader.module.io.tlb writer.module.io.flush := io.flush reader.module.io.flush := io.flush io.busy := writer.module.io.busy || reader.module.io.busy || write_issue_q.io.deq.valid || write_norm_q.io.deq.valid || write_scale_q.io.deq.valid || write_dispatch_q.valid val spad_mems = { val banks = Seq.fill(sp_banks) { Module(new ScratchpadBank( sp_bank_entries, spad_w, aligned_to, config.sp_singleported, use_shared_ext_mem, is_dummy )) } val bank_ios = VecInit(banks.map(_.io)) // Reading from the SRAM banks bank_ios.zipWithIndex.foreach { case (bio, i) => if (use_shared_ext_mem) { io.ext_mem.get.spad(i) <> bio.ext_mem.get } val ex_read_req = io.srams.read(i).req val exread = ex_read_req.valid // TODO we tie the write dispatch queue's, and write issue queue's, ready and valid signals together here val dmawrite = write_dispatch_q.valid && write_norm_q.io.enq.ready && !write_dispatch_q.bits.laddr.is_garbage() && !(bio.write.en && config.sp_singleported.B) && !write_dispatch_q.bits.laddr.is_acc_addr && write_dispatch_q.bits.laddr.sp_bank() === i.U bio.read.req.valid := exread || dmawrite ex_read_req.ready := bio.read.req.ready // The ExecuteController gets priority when reading from SRAMs when (exread) { bio.read.req.bits.addr := ex_read_req.bits.addr bio.read.req.bits.fromDMA := false.B }.elsewhen (dmawrite) { bio.read.req.bits.addr := write_dispatch_q.bits.laddr.sp_row() bio.read.req.bits.fromDMA := true.B when (bio.read.req.fire) { write_dispatch_q.ready := true.B write_norm_q.io.enq.valid := true.B io.dma.write.resp.valid := true.B } }.otherwise { bio.read.req.bits := DontCare } val dma_read_resp = Wire(Decoupled(new ScratchpadReadResp(spad_w))) dma_read_resp.valid := bio.read.resp.valid && bio.read.resp.bits.fromDMA dma_read_resp.bits := bio.read.resp.bits val ex_read_resp = Wire(Decoupled(new ScratchpadReadResp(spad_w))) ex_read_resp.valid := bio.read.resp.valid && !bio.read.resp.bits.fromDMA ex_read_resp.bits := bio.read.resp.bits val dma_read_pipe = Pipeline(dma_read_resp, spad_read_delay) val ex_read_pipe = Pipeline(ex_read_resp, spad_read_delay) bio.read.resp.ready := Mux(bio.read.resp.bits.fromDMA, dma_read_resp.ready, ex_read_resp.ready) dma_read_pipe.ready := writer.module.io.req.ready && !write_issue_q.io.deq.bits.laddr.is_acc_addr && write_issue_q.io.deq.bits.laddr.sp_bank() === i.U && // I believe we don't need to check that write_issue_q is valid here, because if the SRAM's resp is valid, then that means that the write_issue_q's deq should also be valid !write_issue_q.io.deq.bits.laddr.is_garbage() when (dma_read_pipe.fire) { writeData.valid := true.B writeData.bits := dma_read_pipe.bits.data } io.srams.read(i).resp <> ex_read_pipe } // Writing to the SRAM banks bank_ios.zipWithIndex.foreach { case (bio, i) => val exwrite = io.srams.write(i).en // val laddr = mvin_scale_out.bits.tag.addr.asTypeOf(local_addr_t) + mvin_scale_out.bits.row val laddr = mvin_scale_pixel_repeater.io.resp.bits.laddr // val dmaread = mvin_scale_out.valid && !mvin_scale_out.bits.tag.is_acc && val dmaread = mvin_scale_pixel_repeater.io.resp.valid && !mvin_scale_pixel_repeater.io.resp.bits.tag.is_acc && laddr.sp_bank() === i.U // We need to make sure that we don't try to return a dma read resp from both zero_writer and either mvin_scale // or mvin_acc_scale at the same time. The scalers always get priority in those cases /* val zerowrite = zero_writer.io.resp.valid && !zero_writer.io.resp.bits.laddr.is_acc_addr && zero_writer.io.resp.bits.laddr.sp_bank() === i.U && */ val zerowrite = zero_writer_pixel_repeater.io.resp.valid && !zero_writer_pixel_repeater.io.resp.bits.laddr.is_acc_addr && zero_writer_pixel_repeater.io.resp.bits.laddr.sp_bank() === i.U && // !((mvin_scale_out.valid && mvin_scale_out.bits.last) || (mvin_scale_acc_out.valid && mvin_scale_acc_out.bits.last)) !((mvin_scale_pixel_repeater.io.resp.valid && mvin_scale_pixel_repeater.io.resp.bits.last) || (mvin_scale_acc_out.valid && mvin_scale_acc_out.bits.last)) bio.write.en := exwrite || dmaread || zerowrite when (exwrite) { bio.write.addr := io.srams.write(i).addr bio.write.data := io.srams.write(i).data bio.write.mask := io.srams.write(i).mask }.elsewhen (dmaread) { bio.write.addr := laddr.sp_row() bio.write.data := mvin_scale_pixel_repeater.io.resp.bits.out.asUInt bio.write.mask := mvin_scale_pixel_repeater.io.resp.bits.mask take ((spad_w / (aligned_to * 8)) max 1) mvin_scale_pixel_repeater.io.resp.ready := true.B // TODO we combinationally couple valid and ready signals }.elsewhen (zerowrite) { bio.write.addr := zero_writer_pixel_repeater.io.resp.bits.laddr.sp_row() bio.write.data := 0.U bio.write.mask := zero_writer_pixel_repeater.io.resp.bits.mask zero_writer_pixel_repeater.io.resp.ready := true.B // TODO we combinationally couple valid and ready signals }.otherwise { bio.write.addr := DontCare bio.write.data := DontCare bio.write.mask := DontCare } } banks } val acc_row_t = Vec(meshColumns, Vec(tileColumns, accType)) val spad_row_t = Vec(meshColumns, Vec(tileColumns, inputType)) val (acc_norm_unit_in, acc_norm_unit_out) = Normalizer( is_passthru = !config.has_normalizations, max_len = block_cols, num_reduce_lanes = -1, num_stats = 2, latency = 4, fullDataType = acc_row_t, scale_t = acc_scale_t, ) acc_norm_unit_in.valid := false.B acc_norm_unit_in.bits.len := write_norm_q.io.deq.bits.len acc_norm_unit_in.bits.stats_id := write_norm_q.io.deq.bits.acc_norm_stats_id acc_norm_unit_in.bits.cmd := write_norm_q.io.deq.bits.laddr.norm_cmd acc_norm_unit_in.bits.acc_read_resp := DontCare val acc_scale_unit = Module(new AccumulatorScale( acc_row_t, spad_row_t, acc_scale_t.asInstanceOf[V], acc_read_small_width, acc_read_full_width, acc_scale_func, acc_scale_num_units, acc_scale_latency, has_nonlinear_activations, has_normalizations, )) val acc_waiting_to_be_scaled = write_scale_q.io.deq.valid && !write_scale_q.io.deq.bits.laddr.is_garbage() && write_scale_q.io.deq.bits.laddr.is_acc_addr && write_issue_q.io.enq.ready acc_norm_unit_out.ready := acc_scale_unit.io.in.ready && acc_waiting_to_be_scaled acc_scale_unit.io.in.valid := acc_norm_unit_out.valid && acc_waiting_to_be_scaled acc_scale_unit.io.in.bits := acc_norm_unit_out.bits when (acc_scale_unit.io.in.fire) { write_issue_q.io.enq <> write_scale_q.io.deq } acc_scale_unit.io.out.ready := false.B val dma_resp_ready = writer.module.io.req.ready && write_issue_q.io.deq.bits.laddr.is_acc_addr && !write_issue_q.io.deq.bits.laddr.is_garbage() when (acc_scale_unit.io.out.bits.fromDMA && dma_resp_ready) { // Send the acc-scale result into the DMA acc_scale_unit.io.out.ready := true.B writeData.valid := acc_scale_unit.io.out.valid writeData.bits := acc_scale_unit.io.out.bits.data.asUInt fullAccWriteData := acc_scale_unit.io.out.bits.full_data.asUInt } for (i <- 0 until acc_banks) { // Send the acc-sccale result to the ExController io.acc.read_resp(i).valid := false.B io.acc.read_resp(i).bits := acc_scale_unit.io.out.bits when (!acc_scale_unit.io.out.bits.fromDMA && acc_scale_unit.io.out.bits.acc_bank_id === i.U) { acc_scale_unit.io.out.ready := io.acc.read_resp(i).ready io.acc.read_resp(i).valid := acc_scale_unit.io.out.valid } } val acc_adders = Module(new AccPipeShared(acc_latency-1, acc_row_t, acc_banks)) val acc_mems = { val banks = Seq.fill(acc_banks) { Module(new AccumulatorMem( acc_bank_entries, acc_row_t, acc_scale_func, acc_scale_t.asInstanceOf[V], acc_singleported, acc_sub_banks, use_shared_ext_mem, acc_latency, accType, is_dummy )) } val bank_ios = VecInit(banks.map(_.io)) // Getting the output of the bank that's about to be issued to the writer val bank_issued_io = bank_ios(write_issue_q.io.deq.bits.laddr.acc_bank()) // Reading from the Accumulator banks bank_ios.zipWithIndex.foreach { case (bio, i) => if (use_shared_ext_mem) { io.ext_mem.get.acc(i) <> bio.ext_mem.get } acc_adders.io.in_sel(i) := bio.adder.valid acc_adders.io.ina(i) := bio.adder.op1 acc_adders.io.inb(i) := bio.adder.op2 bio.adder.sum := acc_adders.io.out val ex_read_req = io.acc.read_req(i) val exread = ex_read_req.valid // TODO we tie the write dispatch queue's, and write issue queue's, ready and valid signals together here val dmawrite = write_dispatch_q.valid && write_norm_q.io.enq.ready && !write_dispatch_q.bits.laddr.is_garbage() && write_dispatch_q.bits.laddr.is_acc_addr && write_dispatch_q.bits.laddr.acc_bank() === i.U bio.read.req.valid := exread || dmawrite ex_read_req.ready := bio.read.req.ready // The ExecuteController gets priority when reading from accumulator banks when (exread) { bio.read.req.bits.addr := ex_read_req.bits.addr bio.read.req.bits.act := ex_read_req.bits.act bio.read.req.bits.igelu_qb := ex_read_req.bits.igelu_qb bio.read.req.bits.igelu_qc := ex_read_req.bits.igelu_qc bio.read.req.bits.iexp_qln2 := ex_read_req.bits.iexp_qln2 bio.read.req.bits.iexp_qln2_inv := ex_read_req.bits.iexp_qln2_inv bio.read.req.bits.scale := ex_read_req.bits.scale bio.read.req.bits.full := false.B bio.read.req.bits.fromDMA := false.B }.elsewhen (dmawrite) { bio.read.req.bits.addr := write_dispatch_q.bits.laddr.acc_row() bio.read.req.bits.full := write_dispatch_q.bits.laddr.read_full_acc_row bio.read.req.bits.act := write_dispatch_q.bits.acc_act bio.read.req.bits.igelu_qb := write_dispatch_q.bits.acc_igelu_qb.asTypeOf(bio.read.req.bits.igelu_qb) bio.read.req.bits.igelu_qc := write_dispatch_q.bits.acc_igelu_qc.asTypeOf(bio.read.req.bits.igelu_qc) bio.read.req.bits.iexp_qln2 := write_dispatch_q.bits.acc_iexp_qln2.asTypeOf(bio.read.req.bits.iexp_qln2) bio.read.req.bits.iexp_qln2_inv := write_dispatch_q.bits.acc_iexp_qln2_inv.asTypeOf(bio.read.req.bits.iexp_qln2_inv) bio.read.req.bits.scale := write_dispatch_q.bits.acc_scale.asTypeOf(bio.read.req.bits.scale) bio.read.req.bits.fromDMA := true.B when (bio.read.req.fire) { write_dispatch_q.ready := true.B write_norm_q.io.enq.valid := true.B io.dma.write.resp.valid := true.B } }.otherwise { bio.read.req.bits := DontCare } bio.read.resp.ready := false.B when (write_norm_q.io.deq.valid && acc_norm_unit_in.ready && bio.read.resp.valid && write_scale_q.io.enq.ready && write_norm_q.io.deq.bits.laddr.is_acc_addr && !write_norm_q.io.deq.bits.laddr.is_garbage() && write_norm_q.io.deq.bits.laddr.acc_bank() === i.U) { write_norm_q.io.deq.ready := true.B acc_norm_unit_in.valid := true.B bio.read.resp.ready := true.B // Some normalizer commands don't write to main memory, so they don't need to be passed on to the scaling units write_scale_q.io.enq.valid := NormCmd.writes_to_main_memory(write_norm_q.io.deq.bits.laddr.norm_cmd) acc_norm_unit_in.bits.acc_read_resp := bio.read.resp.bits acc_norm_unit_in.bits.acc_read_resp.acc_bank_id := i.U } } // Writing to the accumulator banks bank_ios.zipWithIndex.foreach { case (bio, i) => // Order of precedence during writes is ExecuteController, and then mvin_scale, and then mvin_scale_acc, and // then zero_writer val exwrite = io.acc.write(i).valid io.acc.write(i).ready := true.B assert(!(exwrite && !bio.write.ready), "Execute controller write to AccumulatorMem was skipped") // val from_mvin_scale = mvin_scale_out.valid && mvin_scale_out.bits.tag.is_acc val from_mvin_scale = mvin_scale_pixel_repeater.io.resp.valid && mvin_scale_pixel_repeater.io.resp.bits.tag.is_acc val from_mvin_scale_acc = mvin_scale_acc_out.valid && mvin_scale_acc_out.bits.tag.is_acc // val mvin_scale_laddr = mvin_scale_out.bits.tag.addr.asTypeOf(local_addr_t) + mvin_scale_out.bits.row val mvin_scale_laddr = mvin_scale_pixel_repeater.io.resp.bits.laddr val mvin_scale_acc_laddr = mvin_scale_acc_out.bits.tag.addr.asTypeOf(local_addr_t) + mvin_scale_acc_out.bits.row val dmaread_bank = Mux(from_mvin_scale, mvin_scale_laddr.acc_bank(), mvin_scale_acc_laddr.acc_bank()) val dmaread_row = Mux(from_mvin_scale, mvin_scale_laddr.acc_row(), mvin_scale_acc_laddr.acc_row()) // We need to make sure that we don't try to return a dma read resp from both mvin_scale and mvin_scale_acc // at the same time. mvin_scale always gets priority in this cases val spad_last = mvin_scale_pixel_repeater.io.resp.valid && mvin_scale_pixel_repeater.io.resp.bits.last && !mvin_scale_pixel_repeater.io.resp.bits.tag.is_acc val dmaread = (from_mvin_scale || from_mvin_scale_acc) && dmaread_bank === i.U /* && (mvin_scale_same.B || from_mvin_scale || !spad_dmaread_last) */ // We need to make sure that we don't try to return a dma read resp from both zero_writer and either mvin_scale // or mvin_acc_scale at the same time. The scalers always get priority in those cases /* val zerowrite = zero_writer.io.resp.valid && zero_writer.io.resp.bits.laddr.is_acc_addr && zero_writer.io.resp.bits.laddr.acc_bank() === i.U && */ val zerowrite = zero_writer_pixel_repeater.io.resp.valid && zero_writer_pixel_repeater.io.resp.bits.laddr.is_acc_addr && zero_writer_pixel_repeater.io.resp.bits.laddr.acc_bank() === i.U && // !((mvin_scale_out.valid && mvin_scale_out.bits.last) || (mvin_scale_acc_out.valid && mvin_scale_acc_out.bits.last)) !((mvin_scale_pixel_repeater.io.resp.valid && mvin_scale_pixel_repeater.io.resp.bits.last) || (mvin_scale_acc_out.valid && mvin_scale_acc_out.bits.last)) val consecutive_write_block = RegInit(false.B) if (acc_singleported) { val consecutive_write_sub_bank = RegInit(0.U((1 max log2Ceil(acc_sub_banks)).W)) when (bio.write.fire && bio.write.bits.acc && (bio.write.bits.addr(log2Ceil(acc_sub_banks)-1,0) === consecutive_write_sub_bank)) { consecutive_write_block := true.B } .elsewhen (bio.write.fire && bio.write.bits.acc) { consecutive_write_block := false.B consecutive_write_sub_bank := bio.write.bits.addr(log2Ceil(acc_sub_banks)-1,0) } .otherwise { consecutive_write_block := false.B } } bio.write.valid := false.B // bio.write.bits.acc := MuxCase(zero_writer.io.resp.bits.laddr.accumulate, bio.write.bits.acc := MuxCase(zero_writer_pixel_repeater.io.resp.bits.laddr.accumulate, Seq(exwrite -> io.acc.write(i).bits.acc, // from_mvin_scale -> mvin_scale_out.bits.tag.accumulate, from_mvin_scale -> mvin_scale_pixel_repeater.io.resp.bits.tag.accumulate, from_mvin_scale_acc -> mvin_scale_acc_out.bits.tag.accumulate)) // bio.write.bits.addr := MuxCase(zero_writer.io.resp.bits.laddr.acc_row(), bio.write.bits.addr := MuxCase(zero_writer_pixel_repeater.io.resp.bits.laddr.acc_row(), Seq(exwrite -> io.acc.write(i).bits.addr, (from_mvin_scale || from_mvin_scale_acc) -> dmaread_row)) when (exwrite) { bio.write.valid := true.B bio.write.bits.data := io.acc.write(i).bits.data bio.write.bits.mask := io.acc.write(i).bits.mask }.elsewhen (dmaread && !spad_last && !consecutive_write_block) { bio.write.valid := true.B bio.write.bits.data := Mux(from_mvin_scale, // VecInit(mvin_scale_out.bits.out.map(e => e.withWidthOf(accType))).asTypeOf(acc_row_t), VecInit(mvin_scale_pixel_repeater.io.resp.bits.out.map(e => e.withWidthOf(accType))).asTypeOf(acc_row_t), mvin_scale_acc_out.bits.out.asTypeOf(acc_row_t)) bio.write.bits.mask := Mux(from_mvin_scale, { val n = accType.getWidth / inputType.getWidth // val mask = mvin_scale_out.bits.tag.mask take ((spad_w / (aligned_to * 8)) max 1) val mask = mvin_scale_pixel_repeater.io.resp.bits.mask take ((spad_w / (aligned_to * 8)) max 1) val expanded = VecInit(mask.flatMap(e => Seq.fill(n)(e))) expanded }, mvin_scale_acc_out.bits.tag.mask) when(from_mvin_scale) { mvin_scale_pixel_repeater.io.resp.ready := bio.write.ready }.otherwise { mvin_scale_acc_out.ready := bio.write.ready } }.elsewhen (zerowrite && !spad_last && !consecutive_write_block) { bio.write.valid := true.B bio.write.bits.data := 0.U.asTypeOf(acc_row_t) bio.write.bits.mask := { val n = accType.getWidth / inputType.getWidth val mask = zero_writer_pixel_repeater.io.resp.bits.mask val expanded = VecInit(mask.flatMap(e => Seq.fill(n)(e))) expanded } zero_writer_pixel_repeater.io.resp.ready := bio.write.ready }.otherwise { bio.write.bits.data := DontCare bio.write.bits.mask := DontCare } } banks } // Counter connection io.counter := DontCare io.counter.collect(reader.module.io.counter) io.counter.collect(writer.module.io.counter) } }
module mem_2( // @[Scratchpad.scala:132:26] input [11:0] RW0_addr, input RW0_en, input RW0_clk, input RW0_wmode, input [127:0] RW0_wdata, output [127:0] RW0_rdata, input [15:0] RW0_wmask ); mem_ext mem_ext ( // @[Scratchpad.scala:132:26] .RW0_addr (RW0_addr), .RW0_en (RW0_en), .RW0_clk (RW0_clk), .RW0_wmode (RW0_wmode), .RW0_wdata (RW0_wdata), .RW0_rdata (RW0_rdata), .RW0_wmask (RW0_wmask) ); // @[Scratchpad.scala:132:26] endmodule
Generate the Verilog code corresponding to the following Chisel files. File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File TLPrefetcher.scala: package barf import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy._ import freechips.rocketchip.util._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.subsystem._ case class TLPrefetcherParams( prefetchIds: Int = 4, prefetcher: String => Option[CanInstantiatePrefetcher] = _ => None ) case object TLPrefetcherKey extends Field[TLPrefetcherParams](TLPrefetcherParams()) class TLPrefetcher(implicit p: Parameters) extends LazyModule { val params = p(TLPrefetcherKey) def mapInputIds(masters: Seq[TLMasterParameters]) = TLXbar.assignRanges(masters.map(_.sourceId.size + params.prefetchIds)) val node = TLAdapterNode( clientFn = { cp => cp.v1copy(clients = (mapInputIds(cp.clients) zip cp.clients).map { case (range, c) => c.v1copy( sourceId = range )}) }, managerFn = { mp => mp } ) // Handle size = 1 gracefully (Chisel3 empty range is broken) def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => val nClients = edgeOut.master.clients.size val outIdMap = edgeOut.master.clients.map(_.sourceId) val inIdMap = edgeIn.master.clients.map(_.sourceId) val snoop = Wire(Valid(new Snoop)) val snoop_client = Wire(UInt(log2Ceil(nClients).W)) // Implement prefetchers per client. val prefetchers = edgeOut.master.clients.zipWithIndex.map { case (c,i) => val pParams = params.prefetcher(c.name).getOrElse(NullPrefetcherParams()) println(s"Prefetcher for ${c.name}: ${pParams.desc}") val prefetcher = pParams.instantiate() prefetcher.io.snoop.valid := snoop.valid && snoop_client === i.U prefetcher.io.snoop.bits := snoop.bits prefetcher } val out_arb = Module(new RRArbiter(new Prefetch, nClients)) out_arb.io.in <> prefetchers.map(_.io.request) val tracker = RegInit(0.U(params.prefetchIds.W)) val next_tracker = PriorityEncoder(~tracker) val tracker_free = !tracker(next_tracker) def inIdAdjuster(source: UInt) = Mux1H((inIdMap zip outIdMap).map { case (i,o) => i.contains(source) -> (o.start.U | (source - i.start.U)) }) def outIdAdjuster(source: UInt) = Mux1H((inIdMap zip outIdMap).map { case (i,o) => o.contains(source) -> (trim(source - o.start.U, i.size) + i.start.U) }) def outIdToPrefetchId(source: UInt) = Mux1H((inIdMap zip outIdMap).map { case (i,o) => o.contains(source) -> trim(source - (o.start + i.size).U, params.prefetchIds) }) def prefetchIdToOutId(source: UInt, client: UInt) = Mux1H((inIdMap zip outIdMap).zipWithIndex.map { case ((i,o),id) => (id.U === client) -> ((o.start + i.size).U +& source) }) def inIdToClientId(source: UInt) = Mux1H(inIdMap.zipWithIndex.map { case (i,id) => i.contains(source) -> id.U }) out <> in out.a.bits.source := inIdAdjuster(in.a.bits.source) in.b.bits.source := outIdAdjuster(out.b.bits.source) out.c.bits.source := inIdAdjuster(in.c.bits.source) val d_is_prefetch = out.d.bits.opcode === TLMessages.HintAck in.d.valid := out.d.valid && !d_is_prefetch when (d_is_prefetch) { out.d.ready := true.B } in.d.bits.source := outIdAdjuster(out.d.bits.source) tracker := (tracker ^ ((out.d.valid && d_is_prefetch) << outIdToPrefetchId(out.d.bits.source)) ^ ((out.a.fire && !in.a.valid) << next_tracker) ) snoop.valid := in.a.fire && edgeIn.manager.supportsAcquireBFast(in.a.bits.address, log2Ceil(p(CacheBlockBytes)).U) snoop.bits.address := in.a.bits.address val acq = in.a.bits.opcode.isOneOf(TLMessages.AcquireBlock, TLMessages.AcquirePerm) val toT = in.a.bits.param.isOneOf(TLPermissions.NtoT, TLPermissions.BtoT) val put = edgeIn.hasData(in.a.bits) snoop.bits.write := put || (acq && toT) snoop_client := inIdToClientId(in.a.bits.source) val legal_address = edgeOut.manager.findSafe(out_arb.io.out.bits.block_address).reduce(_||_) val (legal, hint) = edgeOut.Hint( prefetchIdToOutId(next_tracker, out_arb.io.chosen), out_arb.io.out.bits.block_address, log2Up(p(CacheBlockBytes)).U, Mux(out_arb.io.out.bits.write, TLHints.PREFETCH_WRITE, TLHints.PREFETCH_READ) ) out_arb.io.out.ready := false.B when (!in.a.valid) { out.a.valid := out_arb.io.out.valid && tracker_free && legal && legal_address out.a.bits := hint out_arb.io.out.ready := out.a.ready } when (!legal || !legal_address) { out_arb.io.out.ready := true.B } } } } object TLPrefetcher { def apply()(implicit p: Parameters) = { val prefetcher = LazyModule(new TLPrefetcher) prefetcher.node } } case class TilePrefetchingMasterPortParams(tileId: Int, base: HierarchicalElementPortParamsLike) extends HierarchicalElementPortParamsLike { val where = base.where def injectNode(context: Attachable)(implicit p: Parameters): TLNode = { TLPrefetcher() :*=* base.injectNode(context)(p) } }
module TLInterconnectCoupler_sbus_from_rockettile( // @[LazyModuleImp.scala:138:7] input clock, // @[LazyModuleImp.scala:138:7] input reset, // @[LazyModuleImp.scala:138:7] output auto_tl_master_clock_xing_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_tl_master_clock_xing_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_tl_master_clock_xing_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_tl_master_clock_xing_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_tl_master_clock_xing_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_in_b_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_in_b_valid, // @[LazyModuleImp.scala:107:25] output [1:0] auto_tl_master_clock_xing_in_b_bits_param, // @[LazyModuleImp.scala:107:25] output [31:0] auto_tl_master_clock_xing_in_b_bits_address, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_in_c_ready, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_in_c_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_in_c_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_in_c_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_tl_master_clock_xing_in_c_bits_size, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_in_c_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_tl_master_clock_xing_in_c_bits_address, // @[LazyModuleImp.scala:107:25] input [63:0] auto_tl_master_clock_xing_in_c_bits_data, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_in_c_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_tl_master_clock_xing_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_tl_master_clock_xing_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_tl_master_clock_xing_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_in_e_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_in_e_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_tl_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_tl_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [4:0] auto_tl_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_tl_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_tl_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_tl_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_tl_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_tl_out_b_ready, // @[LazyModuleImp.scala:107:25] input auto_tl_out_b_valid, // @[LazyModuleImp.scala:107:25] input [1:0] auto_tl_out_b_bits_param, // @[LazyModuleImp.scala:107:25] input [31:0] auto_tl_out_b_bits_address, // @[LazyModuleImp.scala:107:25] input auto_tl_out_c_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_out_c_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_out_c_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_tl_out_c_bits_size, // @[LazyModuleImp.scala:107:25] output [4:0] auto_tl_out_c_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_tl_out_c_bits_address, // @[LazyModuleImp.scala:107:25] output [63:0] auto_tl_out_c_bits_data, // @[LazyModuleImp.scala:107:25] output auto_tl_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_tl_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_tl_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_tl_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_tl_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [4:0] auto_tl_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_tl_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_tl_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_tl_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_tl_out_e_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_out_e_bits_sink // @[LazyModuleImp.scala:107:25] ); wire no_bufferOut_d_valid; // @[MixedNode.scala:542:17] wire no_bufferOut_d_bits_corrupt; // @[MixedNode.scala:542:17] wire [63:0] no_bufferOut_d_bits_data; // @[MixedNode.scala:542:17] wire no_bufferOut_d_bits_denied; // @[MixedNode.scala:542:17] wire [2:0] no_bufferOut_d_bits_sink; // @[MixedNode.scala:542:17] wire [2:0] no_bufferOut_d_bits_source; // @[MixedNode.scala:542:17] wire [3:0] no_bufferOut_d_bits_size; // @[MixedNode.scala:542:17] wire [1:0] no_bufferOut_d_bits_param; // @[MixedNode.scala:542:17] wire [2:0] no_bufferOut_d_bits_opcode; // @[MixedNode.scala:542:17] wire no_bufferOut_c_ready; // @[MixedNode.scala:542:17] wire no_bufferOut_b_valid; // @[MixedNode.scala:542:17] wire [31:0] no_bufferOut_b_bits_address; // @[MixedNode.scala:542:17] wire [1:0] no_bufferOut_b_bits_param; // @[MixedNode.scala:542:17] wire no_bufferOut_a_ready; // @[MixedNode.scala:542:17] wire auto_tl_master_clock_xing_in_a_valid_0 = auto_tl_master_clock_xing_in_a_valid; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_master_clock_xing_in_a_bits_opcode_0 = auto_tl_master_clock_xing_in_a_bits_opcode; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_master_clock_xing_in_a_bits_param_0 = auto_tl_master_clock_xing_in_a_bits_param; // @[LazyModuleImp.scala:138:7] wire [3:0] auto_tl_master_clock_xing_in_a_bits_size_0 = auto_tl_master_clock_xing_in_a_bits_size; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_master_clock_xing_in_a_bits_source_0 = auto_tl_master_clock_xing_in_a_bits_source; // @[LazyModuleImp.scala:138:7] wire [31:0] auto_tl_master_clock_xing_in_a_bits_address_0 = auto_tl_master_clock_xing_in_a_bits_address; // @[LazyModuleImp.scala:138:7] wire [7:0] auto_tl_master_clock_xing_in_a_bits_mask_0 = auto_tl_master_clock_xing_in_a_bits_mask; // @[LazyModuleImp.scala:138:7] wire [63:0] auto_tl_master_clock_xing_in_a_bits_data_0 = auto_tl_master_clock_xing_in_a_bits_data; // @[LazyModuleImp.scala:138:7] wire auto_tl_master_clock_xing_in_a_bits_corrupt_0 = auto_tl_master_clock_xing_in_a_bits_corrupt; // @[LazyModuleImp.scala:138:7] wire auto_tl_master_clock_xing_in_b_ready_0 = auto_tl_master_clock_xing_in_b_ready; // @[LazyModuleImp.scala:138:7] wire auto_tl_master_clock_xing_in_c_valid_0 = auto_tl_master_clock_xing_in_c_valid; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_master_clock_xing_in_c_bits_opcode_0 = auto_tl_master_clock_xing_in_c_bits_opcode; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_master_clock_xing_in_c_bits_param_0 = auto_tl_master_clock_xing_in_c_bits_param; // @[LazyModuleImp.scala:138:7] wire [3:0] auto_tl_master_clock_xing_in_c_bits_size_0 = auto_tl_master_clock_xing_in_c_bits_size; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_master_clock_xing_in_c_bits_source_0 = auto_tl_master_clock_xing_in_c_bits_source; // @[LazyModuleImp.scala:138:7] wire [31:0] auto_tl_master_clock_xing_in_c_bits_address_0 = auto_tl_master_clock_xing_in_c_bits_address; // @[LazyModuleImp.scala:138:7] wire [63:0] auto_tl_master_clock_xing_in_c_bits_data_0 = auto_tl_master_clock_xing_in_c_bits_data; // @[LazyModuleImp.scala:138:7] wire auto_tl_master_clock_xing_in_c_bits_corrupt_0 = auto_tl_master_clock_xing_in_c_bits_corrupt; // @[LazyModuleImp.scala:138:7] wire auto_tl_master_clock_xing_in_d_ready_0 = auto_tl_master_clock_xing_in_d_ready; // @[LazyModuleImp.scala:138:7] wire auto_tl_master_clock_xing_in_e_valid_0 = auto_tl_master_clock_xing_in_e_valid; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_master_clock_xing_in_e_bits_sink_0 = auto_tl_master_clock_xing_in_e_bits_sink; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_a_ready_0 = auto_tl_out_a_ready; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_b_valid_0 = auto_tl_out_b_valid; // @[LazyModuleImp.scala:138:7] wire [1:0] auto_tl_out_b_bits_param_0 = auto_tl_out_b_bits_param; // @[LazyModuleImp.scala:138:7] wire [31:0] auto_tl_out_b_bits_address_0 = auto_tl_out_b_bits_address; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_c_ready_0 = auto_tl_out_c_ready; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_d_valid_0 = auto_tl_out_d_valid; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_out_d_bits_opcode_0 = auto_tl_out_d_bits_opcode; // @[LazyModuleImp.scala:138:7] wire [1:0] auto_tl_out_d_bits_param_0 = auto_tl_out_d_bits_param; // @[LazyModuleImp.scala:138:7] wire [3:0] auto_tl_out_d_bits_size_0 = auto_tl_out_d_bits_size; // @[LazyModuleImp.scala:138:7] wire [4:0] auto_tl_out_d_bits_source_0 = auto_tl_out_d_bits_source; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_out_d_bits_sink_0 = auto_tl_out_d_bits_sink; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_d_bits_denied_0 = auto_tl_out_d_bits_denied; // @[LazyModuleImp.scala:138:7] wire [63:0] auto_tl_out_d_bits_data_0 = auto_tl_out_d_bits_data; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_d_bits_corrupt_0 = auto_tl_out_d_bits_corrupt; // @[LazyModuleImp.scala:138:7] wire [4:0] auto_tl_out_b_bits_source = 5'h10; // @[MixedNode.scala:542:17, :551:17] wire [4:0] tlOut_b_bits_source = 5'h10; // @[MixedNode.scala:542:17, :551:17] wire [4:0] tlIn_b_bits_source = 5'h10; // @[MixedNode.scala:542:17, :551:17] wire auto_tl_master_clock_xing_in_e_ready = 1'h1; // @[MixedNode.scala:542:17, :551:17] wire auto_tl_out_e_ready = 1'h1; // @[MixedNode.scala:542:17, :551:17] wire tlOut_e_ready = 1'h1; // @[MixedNode.scala:542:17, :551:17] wire tlIn_e_ready = 1'h1; // @[MixedNode.scala:542:17, :551:17] wire no_bufferOut_e_ready = 1'h1; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_e_ready = 1'h1; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_e_ready = 1'h1; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_e_ready = 1'h1; // @[MixedNode.scala:542:17, :551:17] wire auto_tl_master_clock_xing_in_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17, :551:17] wire auto_tl_out_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17, :551:17] wire tlOut_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17, :551:17] wire tlIn_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17, :551:17] wire no_bufferOut_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17, :551:17] wire [63:0] auto_tl_master_clock_xing_in_b_bits_data = 64'h0; // @[MixedNode.scala:542:17, :551:17] wire [63:0] auto_tl_out_b_bits_data = 64'h0; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlOut_b_bits_data = 64'h0; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlIn_b_bits_data = 64'h0; // @[MixedNode.scala:542:17, :551:17] wire [63:0] no_bufferOut_b_bits_data = 64'h0; // @[MixedNode.scala:542:17, :551:17] wire [63:0] no_bufferIn_b_bits_data = 64'h0; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlMasterClockXingOut_b_bits_data = 64'h0; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlMasterClockXingIn_b_bits_data = 64'h0; // @[MixedNode.scala:542:17, :551:17] wire [7:0] auto_tl_master_clock_xing_in_b_bits_mask = 8'hFF; // @[MixedNode.scala:542:17, :551:17] wire [7:0] auto_tl_out_b_bits_mask = 8'hFF; // @[MixedNode.scala:542:17, :551:17] wire [7:0] tlOut_b_bits_mask = 8'hFF; // @[MixedNode.scala:542:17, :551:17] wire [7:0] tlIn_b_bits_mask = 8'hFF; // @[MixedNode.scala:542:17, :551:17] wire [7:0] no_bufferOut_b_bits_mask = 8'hFF; // @[MixedNode.scala:542:17, :551:17] wire [7:0] no_bufferIn_b_bits_mask = 8'hFF; // @[MixedNode.scala:542:17, :551:17] wire [7:0] tlMasterClockXingOut_b_bits_mask = 8'hFF; // @[MixedNode.scala:542:17, :551:17] wire [7:0] tlMasterClockXingIn_b_bits_mask = 8'hFF; // @[MixedNode.scala:542:17, :551:17] wire [2:0] auto_tl_master_clock_xing_in_b_bits_source = 3'h0; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferOut_b_bits_source = 3'h0; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferIn_b_bits_source = 3'h0; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingOut_b_bits_source = 3'h0; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingIn_b_bits_source = 3'h0; // @[MixedNode.scala:542:17, :551:17] wire [3:0] auto_tl_master_clock_xing_in_b_bits_size = 4'h6; // @[MixedNode.scala:542:17, :551:17] wire [3:0] auto_tl_out_b_bits_size = 4'h6; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlOut_b_bits_size = 4'h6; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlIn_b_bits_size = 4'h6; // @[MixedNode.scala:542:17, :551:17] wire [3:0] no_bufferOut_b_bits_size = 4'h6; // @[MixedNode.scala:542:17, :551:17] wire [3:0] no_bufferIn_b_bits_size = 4'h6; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlMasterClockXingOut_b_bits_size = 4'h6; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlMasterClockXingIn_b_bits_size = 4'h6; // @[MixedNode.scala:542:17, :551:17] wire [2:0] auto_tl_master_clock_xing_in_b_bits_opcode = 3'h6; // @[MixedNode.scala:542:17, :551:17] wire [2:0] auto_tl_out_b_bits_opcode = 3'h6; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlOut_b_bits_opcode = 3'h6; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlIn_b_bits_opcode = 3'h6; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferOut_b_bits_opcode = 3'h6; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferIn_b_bits_opcode = 3'h6; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingOut_b_bits_opcode = 3'h6; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_a_ready; // @[MixedNode.scala:551:17] wire [2:0] tlMasterClockXingIn_b_bits_opcode = 3'h6; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_a_valid = auto_tl_master_clock_xing_in_a_valid_0; // @[MixedNode.scala:551:17] wire [2:0] tlMasterClockXingIn_a_bits_opcode = auto_tl_master_clock_xing_in_a_bits_opcode_0; // @[MixedNode.scala:551:17] wire [2:0] tlMasterClockXingIn_a_bits_param = auto_tl_master_clock_xing_in_a_bits_param_0; // @[MixedNode.scala:551:17] wire [3:0] tlMasterClockXingIn_a_bits_size = auto_tl_master_clock_xing_in_a_bits_size_0; // @[MixedNode.scala:551:17] wire [2:0] tlMasterClockXingIn_a_bits_source = auto_tl_master_clock_xing_in_a_bits_source_0; // @[MixedNode.scala:551:17] wire [31:0] tlMasterClockXingIn_a_bits_address = auto_tl_master_clock_xing_in_a_bits_address_0; // @[MixedNode.scala:551:17] wire [7:0] tlMasterClockXingIn_a_bits_mask = auto_tl_master_clock_xing_in_a_bits_mask_0; // @[MixedNode.scala:551:17] wire [63:0] tlMasterClockXingIn_a_bits_data = auto_tl_master_clock_xing_in_a_bits_data_0; // @[MixedNode.scala:551:17] wire tlMasterClockXingIn_a_bits_corrupt = auto_tl_master_clock_xing_in_a_bits_corrupt_0; // @[MixedNode.scala:551:17] wire tlMasterClockXingIn_b_ready = auto_tl_master_clock_xing_in_b_ready_0; // @[MixedNode.scala:551:17] wire tlMasterClockXingIn_b_valid; // @[MixedNode.scala:551:17] wire [1:0] tlMasterClockXingIn_b_bits_param; // @[MixedNode.scala:551:17] wire [31:0] tlMasterClockXingIn_b_bits_address; // @[MixedNode.scala:551:17] wire tlMasterClockXingIn_c_ready; // @[MixedNode.scala:551:17] wire tlMasterClockXingIn_c_valid = auto_tl_master_clock_xing_in_c_valid_0; // @[MixedNode.scala:551:17] wire [2:0] tlMasterClockXingIn_c_bits_opcode = auto_tl_master_clock_xing_in_c_bits_opcode_0; // @[MixedNode.scala:551:17] wire [2:0] tlMasterClockXingIn_c_bits_param = auto_tl_master_clock_xing_in_c_bits_param_0; // @[MixedNode.scala:551:17] wire [3:0] tlMasterClockXingIn_c_bits_size = auto_tl_master_clock_xing_in_c_bits_size_0; // @[MixedNode.scala:551:17] wire [2:0] tlMasterClockXingIn_c_bits_source = auto_tl_master_clock_xing_in_c_bits_source_0; // @[MixedNode.scala:551:17] wire [31:0] tlMasterClockXingIn_c_bits_address = auto_tl_master_clock_xing_in_c_bits_address_0; // @[MixedNode.scala:551:17] wire [63:0] tlMasterClockXingIn_c_bits_data = auto_tl_master_clock_xing_in_c_bits_data_0; // @[MixedNode.scala:551:17] wire tlMasterClockXingIn_c_bits_corrupt = auto_tl_master_clock_xing_in_c_bits_corrupt_0; // @[MixedNode.scala:551:17] wire tlMasterClockXingIn_d_ready = auto_tl_master_clock_xing_in_d_ready_0; // @[MixedNode.scala:551:17] wire tlMasterClockXingIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] tlMasterClockXingIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] tlMasterClockXingIn_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] tlMasterClockXingIn_d_bits_size; // @[MixedNode.scala:551:17] wire [2:0] tlMasterClockXingIn_d_bits_source; // @[MixedNode.scala:551:17] wire [2:0] tlMasterClockXingIn_d_bits_sink; // @[MixedNode.scala:551:17] wire tlMasterClockXingIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] tlMasterClockXingIn_d_bits_data; // @[MixedNode.scala:551:17] wire tlMasterClockXingIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire tlMasterClockXingIn_e_valid = auto_tl_master_clock_xing_in_e_valid_0; // @[MixedNode.scala:551:17] wire [2:0] tlMasterClockXingIn_e_bits_sink = auto_tl_master_clock_xing_in_e_bits_sink_0; // @[MixedNode.scala:551:17] wire tlOut_a_ready = auto_tl_out_a_ready_0; // @[MixedNode.scala:542:17] wire tlOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] tlOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] tlOut_a_bits_param; // @[MixedNode.scala:542:17] wire [3:0] tlOut_a_bits_size; // @[MixedNode.scala:542:17] wire [4:0] tlOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] tlOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] tlOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] tlOut_a_bits_data; // @[MixedNode.scala:542:17] wire tlOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire tlOut_b_ready; // @[MixedNode.scala:542:17] wire tlOut_b_valid = auto_tl_out_b_valid_0; // @[MixedNode.scala:542:17] wire [1:0] tlOut_b_bits_param = auto_tl_out_b_bits_param_0; // @[MixedNode.scala:542:17] wire [31:0] tlOut_b_bits_address = auto_tl_out_b_bits_address_0; // @[MixedNode.scala:542:17] wire tlOut_c_ready = auto_tl_out_c_ready_0; // @[MixedNode.scala:542:17] wire tlOut_c_valid; // @[MixedNode.scala:542:17] wire [2:0] tlOut_c_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] tlOut_c_bits_param; // @[MixedNode.scala:542:17] wire [3:0] tlOut_c_bits_size; // @[MixedNode.scala:542:17] wire [4:0] tlOut_c_bits_source; // @[MixedNode.scala:542:17] wire [31:0] tlOut_c_bits_address; // @[MixedNode.scala:542:17] wire [63:0] tlOut_c_bits_data; // @[MixedNode.scala:542:17] wire tlOut_c_bits_corrupt; // @[MixedNode.scala:542:17] wire tlOut_d_ready; // @[MixedNode.scala:542:17] wire tlOut_d_valid = auto_tl_out_d_valid_0; // @[MixedNode.scala:542:17] wire [2:0] tlOut_d_bits_opcode = auto_tl_out_d_bits_opcode_0; // @[MixedNode.scala:542:17] wire [1:0] tlOut_d_bits_param = auto_tl_out_d_bits_param_0; // @[MixedNode.scala:542:17] wire [3:0] tlOut_d_bits_size = auto_tl_out_d_bits_size_0; // @[MixedNode.scala:542:17] wire [4:0] tlOut_d_bits_source = auto_tl_out_d_bits_source_0; // @[MixedNode.scala:542:17] wire [2:0] tlOut_d_bits_sink = auto_tl_out_d_bits_sink_0; // @[MixedNode.scala:542:17] wire tlOut_d_bits_denied = auto_tl_out_d_bits_denied_0; // @[MixedNode.scala:542:17] wire [63:0] tlOut_d_bits_data = auto_tl_out_d_bits_data_0; // @[MixedNode.scala:542:17] wire tlOut_d_bits_corrupt = auto_tl_out_d_bits_corrupt_0; // @[MixedNode.scala:542:17] wire tlOut_e_valid; // @[MixedNode.scala:542:17] wire [2:0] tlOut_e_bits_sink; // @[MixedNode.scala:542:17] wire auto_tl_master_clock_xing_in_a_ready_0; // @[LazyModuleImp.scala:138:7] wire [1:0] auto_tl_master_clock_xing_in_b_bits_param_0; // @[LazyModuleImp.scala:138:7] wire [31:0] auto_tl_master_clock_xing_in_b_bits_address_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_master_clock_xing_in_b_valid_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_master_clock_xing_in_c_ready_0; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_master_clock_xing_in_d_bits_opcode_0; // @[LazyModuleImp.scala:138:7] wire [1:0] auto_tl_master_clock_xing_in_d_bits_param_0; // @[LazyModuleImp.scala:138:7] wire [3:0] auto_tl_master_clock_xing_in_d_bits_size_0; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_master_clock_xing_in_d_bits_source_0; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_master_clock_xing_in_d_bits_sink_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_master_clock_xing_in_d_bits_denied_0; // @[LazyModuleImp.scala:138:7] wire [63:0] auto_tl_master_clock_xing_in_d_bits_data_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_master_clock_xing_in_d_bits_corrupt_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_master_clock_xing_in_d_valid_0; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_out_a_bits_opcode_0; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_out_a_bits_param_0; // @[LazyModuleImp.scala:138:7] wire [3:0] auto_tl_out_a_bits_size_0; // @[LazyModuleImp.scala:138:7] wire [4:0] auto_tl_out_a_bits_source_0; // @[LazyModuleImp.scala:138:7] wire [31:0] auto_tl_out_a_bits_address_0; // @[LazyModuleImp.scala:138:7] wire [7:0] auto_tl_out_a_bits_mask_0; // @[LazyModuleImp.scala:138:7] wire [63:0] auto_tl_out_a_bits_data_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_a_bits_corrupt_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_a_valid_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_b_ready_0; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_out_c_bits_opcode_0; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_out_c_bits_param_0; // @[LazyModuleImp.scala:138:7] wire [3:0] auto_tl_out_c_bits_size_0; // @[LazyModuleImp.scala:138:7] wire [4:0] auto_tl_out_c_bits_source_0; // @[LazyModuleImp.scala:138:7] wire [31:0] auto_tl_out_c_bits_address_0; // @[LazyModuleImp.scala:138:7] wire [63:0] auto_tl_out_c_bits_data_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_c_bits_corrupt_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_c_valid_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_d_ready_0; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_out_e_bits_sink_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_out_e_valid_0; // @[LazyModuleImp.scala:138:7] wire tlIn_a_ready = tlOut_a_ready; // @[MixedNode.scala:542:17, :551:17] wire tlIn_a_valid; // @[MixedNode.scala:551:17] assign auto_tl_out_a_valid_0 = tlOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] tlIn_a_bits_opcode; // @[MixedNode.scala:551:17] assign auto_tl_out_a_bits_opcode_0 = tlOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] tlIn_a_bits_param; // @[MixedNode.scala:551:17] assign auto_tl_out_a_bits_param_0 = tlOut_a_bits_param; // @[MixedNode.scala:542:17] wire [3:0] tlIn_a_bits_size; // @[MixedNode.scala:551:17] assign auto_tl_out_a_bits_size_0 = tlOut_a_bits_size; // @[MixedNode.scala:542:17] wire [4:0] tlIn_a_bits_source; // @[MixedNode.scala:551:17] assign auto_tl_out_a_bits_source_0 = tlOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] tlIn_a_bits_address; // @[MixedNode.scala:551:17] assign auto_tl_out_a_bits_address_0 = tlOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] tlIn_a_bits_mask; // @[MixedNode.scala:551:17] assign auto_tl_out_a_bits_mask_0 = tlOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] tlIn_a_bits_data; // @[MixedNode.scala:551:17] assign auto_tl_out_a_bits_data_0 = tlOut_a_bits_data; // @[MixedNode.scala:542:17] wire tlIn_a_bits_corrupt; // @[MixedNode.scala:551:17] assign auto_tl_out_a_bits_corrupt_0 = tlOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire tlIn_b_ready; // @[MixedNode.scala:551:17] assign auto_tl_out_b_ready_0 = tlOut_b_ready; // @[MixedNode.scala:542:17] wire tlIn_b_valid = tlOut_b_valid; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlIn_b_bits_param = tlOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [31:0] tlIn_b_bits_address = tlOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17] wire tlIn_c_ready = tlOut_c_ready; // @[MixedNode.scala:542:17, :551:17] wire tlIn_c_valid; // @[MixedNode.scala:551:17] assign auto_tl_out_c_valid_0 = tlOut_c_valid; // @[MixedNode.scala:542:17] wire [2:0] tlIn_c_bits_opcode; // @[MixedNode.scala:551:17] assign auto_tl_out_c_bits_opcode_0 = tlOut_c_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] tlIn_c_bits_param; // @[MixedNode.scala:551:17] assign auto_tl_out_c_bits_param_0 = tlOut_c_bits_param; // @[MixedNode.scala:542:17] wire [3:0] tlIn_c_bits_size; // @[MixedNode.scala:551:17] assign auto_tl_out_c_bits_size_0 = tlOut_c_bits_size; // @[MixedNode.scala:542:17] wire [4:0] tlIn_c_bits_source; // @[MixedNode.scala:551:17] assign auto_tl_out_c_bits_source_0 = tlOut_c_bits_source; // @[MixedNode.scala:542:17] wire [31:0] tlIn_c_bits_address; // @[MixedNode.scala:551:17] assign auto_tl_out_c_bits_address_0 = tlOut_c_bits_address; // @[MixedNode.scala:542:17] wire [63:0] tlIn_c_bits_data; // @[MixedNode.scala:551:17] assign auto_tl_out_c_bits_data_0 = tlOut_c_bits_data; // @[MixedNode.scala:542:17] wire tlIn_c_bits_corrupt; // @[MixedNode.scala:551:17] assign auto_tl_out_c_bits_corrupt_0 = tlOut_c_bits_corrupt; // @[MixedNode.scala:542:17] wire tlIn_d_ready; // @[MixedNode.scala:551:17] assign auto_tl_out_d_ready_0 = tlOut_d_ready; // @[MixedNode.scala:542:17] wire tlIn_d_valid = tlOut_d_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlIn_d_bits_opcode = tlOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlIn_d_bits_param = tlOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlIn_d_bits_size = tlOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [4:0] tlIn_d_bits_source = tlOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlIn_d_bits_sink = tlOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire tlIn_d_bits_denied = tlOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlIn_d_bits_data = tlOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] wire tlIn_d_bits_corrupt = tlOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire tlIn_e_valid; // @[MixedNode.scala:551:17] assign auto_tl_out_e_valid_0 = tlOut_e_valid; // @[MixedNode.scala:542:17] wire [2:0] tlIn_e_bits_sink; // @[MixedNode.scala:551:17] assign auto_tl_out_e_bits_sink_0 = tlOut_e_bits_sink; // @[MixedNode.scala:542:17] assign tlOut_a_valid = tlIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_opcode = tlIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_param = tlIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_size = tlIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_source = tlIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_address = tlIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_mask = tlIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_data = tlIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_corrupt = tlIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign tlOut_b_ready = tlIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign tlOut_c_valid = tlIn_c_valid; // @[MixedNode.scala:542:17, :551:17] assign tlOut_c_bits_opcode = tlIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlOut_c_bits_param = tlIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlOut_c_bits_size = tlIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlOut_c_bits_source = tlIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlOut_c_bits_address = tlIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlOut_c_bits_data = tlIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlOut_c_bits_corrupt = tlIn_c_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign tlOut_d_ready = tlIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign tlOut_e_valid = tlIn_e_valid; // @[MixedNode.scala:542:17, :551:17] assign tlOut_e_bits_sink = tlIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_a_ready = no_bufferOut_a_ready; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_a_valid; // @[MixedNode.scala:551:17] wire [2:0] no_bufferIn_a_bits_opcode; // @[MixedNode.scala:551:17] wire [2:0] no_bufferIn_a_bits_param; // @[MixedNode.scala:551:17] wire [3:0] no_bufferIn_a_bits_size; // @[MixedNode.scala:551:17] wire [2:0] no_bufferIn_a_bits_source; // @[MixedNode.scala:551:17] wire [31:0] no_bufferIn_a_bits_address; // @[MixedNode.scala:551:17] wire [7:0] no_bufferIn_a_bits_mask; // @[MixedNode.scala:551:17] wire [63:0] no_bufferIn_a_bits_data; // @[MixedNode.scala:551:17] wire no_bufferIn_a_bits_corrupt; // @[MixedNode.scala:551:17] wire no_bufferIn_b_ready; // @[MixedNode.scala:551:17] wire no_bufferIn_b_valid = no_bufferOut_b_valid; // @[MixedNode.scala:542:17, :551:17] wire [1:0] no_bufferIn_b_bits_param = no_bufferOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [31:0] no_bufferIn_b_bits_address = no_bufferOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_c_ready = no_bufferOut_c_ready; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_c_valid; // @[MixedNode.scala:551:17] wire [2:0] no_bufferIn_c_bits_opcode; // @[MixedNode.scala:551:17] wire [2:0] no_bufferIn_c_bits_param; // @[MixedNode.scala:551:17] wire [3:0] no_bufferIn_c_bits_size; // @[MixedNode.scala:551:17] wire [2:0] no_bufferIn_c_bits_source; // @[MixedNode.scala:551:17] wire [31:0] no_bufferIn_c_bits_address; // @[MixedNode.scala:551:17] wire [63:0] no_bufferIn_c_bits_data; // @[MixedNode.scala:551:17] wire no_bufferIn_c_bits_corrupt; // @[MixedNode.scala:551:17] wire no_bufferIn_d_ready; // @[MixedNode.scala:551:17] wire no_bufferIn_d_valid = no_bufferOut_d_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferIn_d_bits_opcode = no_bufferOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] no_bufferIn_d_bits_param = no_bufferOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [3:0] no_bufferIn_d_bits_size = no_bufferOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferIn_d_bits_source = no_bufferOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferIn_d_bits_sink = no_bufferOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_d_bits_denied = no_bufferOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] wire [63:0] no_bufferIn_d_bits_data = no_bufferOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_d_bits_corrupt = no_bufferOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_e_valid; // @[MixedNode.scala:551:17] wire [2:0] no_bufferIn_e_bits_sink; // @[MixedNode.scala:551:17] wire [2:0] no_bufferOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] no_bufferOut_a_bits_param; // @[MixedNode.scala:542:17] wire [3:0] no_bufferOut_a_bits_size; // @[MixedNode.scala:542:17] wire [2:0] no_bufferOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] no_bufferOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] no_bufferOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] no_bufferOut_a_bits_data; // @[MixedNode.scala:542:17] wire no_bufferOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire no_bufferOut_a_valid; // @[MixedNode.scala:542:17] wire no_bufferOut_b_ready; // @[MixedNode.scala:542:17] wire [2:0] no_bufferOut_c_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] no_bufferOut_c_bits_param; // @[MixedNode.scala:542:17] wire [3:0] no_bufferOut_c_bits_size; // @[MixedNode.scala:542:17] wire [2:0] no_bufferOut_c_bits_source; // @[MixedNode.scala:542:17] wire [31:0] no_bufferOut_c_bits_address; // @[MixedNode.scala:542:17] wire [63:0] no_bufferOut_c_bits_data; // @[MixedNode.scala:542:17] wire no_bufferOut_c_bits_corrupt; // @[MixedNode.scala:542:17] wire no_bufferOut_c_valid; // @[MixedNode.scala:542:17] wire no_bufferOut_d_ready; // @[MixedNode.scala:542:17] wire [2:0] no_bufferOut_e_bits_sink; // @[MixedNode.scala:542:17] wire no_bufferOut_e_valid; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_a_ready = no_bufferIn_a_ready; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_a_valid; // @[MixedNode.scala:542:17] assign no_bufferOut_a_valid = no_bufferIn_a_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingOut_a_bits_opcode; // @[MixedNode.scala:542:17] assign no_bufferOut_a_bits_opcode = no_bufferIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingOut_a_bits_param; // @[MixedNode.scala:542:17] assign no_bufferOut_a_bits_param = no_bufferIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlMasterClockXingOut_a_bits_size; // @[MixedNode.scala:542:17] assign no_bufferOut_a_bits_size = no_bufferIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingOut_a_bits_source; // @[MixedNode.scala:542:17] assign no_bufferOut_a_bits_source = no_bufferIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [31:0] tlMasterClockXingOut_a_bits_address; // @[MixedNode.scala:542:17] assign no_bufferOut_a_bits_address = no_bufferIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] wire [7:0] tlMasterClockXingOut_a_bits_mask; // @[MixedNode.scala:542:17] assign no_bufferOut_a_bits_mask = no_bufferIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlMasterClockXingOut_a_bits_data; // @[MixedNode.scala:542:17] assign no_bufferOut_a_bits_data = no_bufferIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_a_bits_corrupt; // @[MixedNode.scala:542:17] assign no_bufferOut_a_bits_corrupt = no_bufferIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_b_ready; // @[MixedNode.scala:542:17] assign no_bufferOut_b_ready = no_bufferIn_b_ready; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_b_valid = no_bufferIn_b_valid; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlMasterClockXingOut_b_bits_param = no_bufferIn_b_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [31:0] tlMasterClockXingOut_b_bits_address = no_bufferIn_b_bits_address; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_c_ready = no_bufferIn_c_ready; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_c_valid; // @[MixedNode.scala:542:17] assign no_bufferOut_c_valid = no_bufferIn_c_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingOut_c_bits_opcode; // @[MixedNode.scala:542:17] assign no_bufferOut_c_bits_opcode = no_bufferIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingOut_c_bits_param; // @[MixedNode.scala:542:17] assign no_bufferOut_c_bits_param = no_bufferIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlMasterClockXingOut_c_bits_size; // @[MixedNode.scala:542:17] assign no_bufferOut_c_bits_size = no_bufferIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingOut_c_bits_source; // @[MixedNode.scala:542:17] assign no_bufferOut_c_bits_source = no_bufferIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [31:0] tlMasterClockXingOut_c_bits_address; // @[MixedNode.scala:542:17] assign no_bufferOut_c_bits_address = no_bufferIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlMasterClockXingOut_c_bits_data; // @[MixedNode.scala:542:17] assign no_bufferOut_c_bits_data = no_bufferIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_c_bits_corrupt; // @[MixedNode.scala:542:17] assign no_bufferOut_c_bits_corrupt = no_bufferIn_c_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_d_ready; // @[MixedNode.scala:542:17] assign no_bufferOut_d_ready = no_bufferIn_d_ready; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_d_valid = no_bufferIn_d_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingOut_d_bits_opcode = no_bufferIn_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlMasterClockXingOut_d_bits_param = no_bufferIn_d_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlMasterClockXingOut_d_bits_size = no_bufferIn_d_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingOut_d_bits_source = no_bufferIn_d_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingOut_d_bits_sink = no_bufferIn_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_d_bits_denied = no_bufferIn_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlMasterClockXingOut_d_bits_data = no_bufferIn_d_bits_data; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_d_bits_corrupt = no_bufferIn_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingOut_e_valid; // @[MixedNode.scala:542:17] assign no_bufferOut_e_valid = no_bufferIn_e_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingOut_e_bits_sink; // @[MixedNode.scala:542:17] assign no_bufferOut_e_bits_sink = no_bufferIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_a_ready = tlMasterClockXingOut_a_ready; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_a_valid = tlMasterClockXingOut_a_valid; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_a_bits_opcode = tlMasterClockXingOut_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_a_bits_param = tlMasterClockXingOut_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_a_bits_size = tlMasterClockXingOut_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_a_bits_source = tlMasterClockXingOut_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_a_bits_address = tlMasterClockXingOut_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_a_bits_mask = tlMasterClockXingOut_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_a_bits_data = tlMasterClockXingOut_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_a_bits_corrupt = tlMasterClockXingOut_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_b_ready = tlMasterClockXingOut_b_ready; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_b_valid = tlMasterClockXingOut_b_valid; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_b_bits_param = tlMasterClockXingOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_b_bits_address = tlMasterClockXingOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_c_ready = tlMasterClockXingOut_c_ready; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_c_valid = tlMasterClockXingOut_c_valid; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_c_bits_opcode = tlMasterClockXingOut_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_c_bits_param = tlMasterClockXingOut_c_bits_param; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_c_bits_size = tlMasterClockXingOut_c_bits_size; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_c_bits_source = tlMasterClockXingOut_c_bits_source; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_c_bits_address = tlMasterClockXingOut_c_bits_address; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_c_bits_data = tlMasterClockXingOut_c_bits_data; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_c_bits_corrupt = tlMasterClockXingOut_c_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_d_ready = tlMasterClockXingOut_d_ready; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_d_valid = tlMasterClockXingOut_d_valid; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_d_bits_opcode = tlMasterClockXingOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_d_bits_param = tlMasterClockXingOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_d_bits_size = tlMasterClockXingOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_d_bits_source = tlMasterClockXingOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_d_bits_sink = tlMasterClockXingOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_d_bits_denied = tlMasterClockXingOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_d_bits_data = tlMasterClockXingOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingIn_d_bits_corrupt = tlMasterClockXingOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_e_valid = tlMasterClockXingOut_e_valid; // @[MixedNode.scala:542:17, :551:17] assign no_bufferIn_e_bits_sink = tlMasterClockXingOut_e_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign auto_tl_master_clock_xing_in_a_ready_0 = tlMasterClockXingIn_a_ready; // @[MixedNode.scala:551:17] assign tlMasterClockXingOut_a_valid = tlMasterClockXingIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_opcode = tlMasterClockXingIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_param = tlMasterClockXingIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_size = tlMasterClockXingIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_source = tlMasterClockXingIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_address = tlMasterClockXingIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_mask = tlMasterClockXingIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_data = tlMasterClockXingIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_corrupt = tlMasterClockXingIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_b_ready = tlMasterClockXingIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_tl_master_clock_xing_in_b_valid_0 = tlMasterClockXingIn_b_valid; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_in_b_bits_param_0 = tlMasterClockXingIn_b_bits_param; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_in_b_bits_address_0 = tlMasterClockXingIn_b_bits_address; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_in_c_ready_0 = tlMasterClockXingIn_c_ready; // @[MixedNode.scala:551:17] assign tlMasterClockXingOut_c_valid = tlMasterClockXingIn_c_valid; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_opcode = tlMasterClockXingIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_param = tlMasterClockXingIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_size = tlMasterClockXingIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_source = tlMasterClockXingIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_address = tlMasterClockXingIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_data = tlMasterClockXingIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_corrupt = tlMasterClockXingIn_c_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_d_ready = tlMasterClockXingIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_tl_master_clock_xing_in_d_valid_0 = tlMasterClockXingIn_d_valid; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_in_d_bits_opcode_0 = tlMasterClockXingIn_d_bits_opcode; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_in_d_bits_param_0 = tlMasterClockXingIn_d_bits_param; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_in_d_bits_size_0 = tlMasterClockXingIn_d_bits_size; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_in_d_bits_source_0 = tlMasterClockXingIn_d_bits_source; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_in_d_bits_sink_0 = tlMasterClockXingIn_d_bits_sink; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_in_d_bits_denied_0 = tlMasterClockXingIn_d_bits_denied; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_in_d_bits_data_0 = tlMasterClockXingIn_d_bits_data; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_in_d_bits_corrupt_0 = tlMasterClockXingIn_d_bits_corrupt; // @[MixedNode.scala:551:17] assign tlMasterClockXingOut_e_valid = tlMasterClockXingIn_e_valid; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_e_bits_sink = tlMasterClockXingIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17] TLPrefetcher prefetcher ( // @[TLPrefetcher.scala:120:32] .clock (clock), .reset (reset), .auto_in_a_ready (no_bufferOut_a_ready), .auto_in_a_valid (no_bufferOut_a_valid), // @[MixedNode.scala:542:17] .auto_in_a_bits_opcode (no_bufferOut_a_bits_opcode), // @[MixedNode.scala:542:17] .auto_in_a_bits_param (no_bufferOut_a_bits_param), // @[MixedNode.scala:542:17] .auto_in_a_bits_size (no_bufferOut_a_bits_size), // @[MixedNode.scala:542:17] .auto_in_a_bits_source (no_bufferOut_a_bits_source), // @[MixedNode.scala:542:17] .auto_in_a_bits_address (no_bufferOut_a_bits_address), // @[MixedNode.scala:542:17] .auto_in_a_bits_mask (no_bufferOut_a_bits_mask), // @[MixedNode.scala:542:17] .auto_in_a_bits_data (no_bufferOut_a_bits_data), // @[MixedNode.scala:542:17] .auto_in_a_bits_corrupt (no_bufferOut_a_bits_corrupt), // @[MixedNode.scala:542:17] .auto_in_b_ready (no_bufferOut_b_ready), // @[MixedNode.scala:542:17] .auto_in_b_valid (no_bufferOut_b_valid), .auto_in_b_bits_param (no_bufferOut_b_bits_param), .auto_in_b_bits_address (no_bufferOut_b_bits_address), .auto_in_c_ready (no_bufferOut_c_ready), .auto_in_c_valid (no_bufferOut_c_valid), // @[MixedNode.scala:542:17] .auto_in_c_bits_opcode (no_bufferOut_c_bits_opcode), // @[MixedNode.scala:542:17] .auto_in_c_bits_param (no_bufferOut_c_bits_param), // @[MixedNode.scala:542:17] .auto_in_c_bits_size (no_bufferOut_c_bits_size), // @[MixedNode.scala:542:17] .auto_in_c_bits_source (no_bufferOut_c_bits_source), // @[MixedNode.scala:542:17] .auto_in_c_bits_address (no_bufferOut_c_bits_address), // @[MixedNode.scala:542:17] .auto_in_c_bits_data (no_bufferOut_c_bits_data), // @[MixedNode.scala:542:17] .auto_in_c_bits_corrupt (no_bufferOut_c_bits_corrupt), // @[MixedNode.scala:542:17] .auto_in_d_ready (no_bufferOut_d_ready), // @[MixedNode.scala:542:17] .auto_in_d_valid (no_bufferOut_d_valid), .auto_in_d_bits_opcode (no_bufferOut_d_bits_opcode), .auto_in_d_bits_param (no_bufferOut_d_bits_param), .auto_in_d_bits_size (no_bufferOut_d_bits_size), .auto_in_d_bits_source (no_bufferOut_d_bits_source), .auto_in_d_bits_sink (no_bufferOut_d_bits_sink), .auto_in_d_bits_denied (no_bufferOut_d_bits_denied), .auto_in_d_bits_data (no_bufferOut_d_bits_data), .auto_in_d_bits_corrupt (no_bufferOut_d_bits_corrupt), .auto_in_e_valid (no_bufferOut_e_valid), // @[MixedNode.scala:542:17] .auto_in_e_bits_sink (no_bufferOut_e_bits_sink), // @[MixedNode.scala:542:17] .auto_out_a_ready (tlIn_a_ready), // @[MixedNode.scala:551:17] .auto_out_a_valid (tlIn_a_valid), .auto_out_a_bits_opcode (tlIn_a_bits_opcode), .auto_out_a_bits_param (tlIn_a_bits_param), .auto_out_a_bits_size (tlIn_a_bits_size), .auto_out_a_bits_source (tlIn_a_bits_source), .auto_out_a_bits_address (tlIn_a_bits_address), .auto_out_a_bits_mask (tlIn_a_bits_mask), .auto_out_a_bits_data (tlIn_a_bits_data), .auto_out_a_bits_corrupt (tlIn_a_bits_corrupt), .auto_out_b_ready (tlIn_b_ready), .auto_out_b_valid (tlIn_b_valid), // @[MixedNode.scala:551:17] .auto_out_b_bits_param (tlIn_b_bits_param), // @[MixedNode.scala:551:17] .auto_out_b_bits_address (tlIn_b_bits_address), // @[MixedNode.scala:551:17] .auto_out_c_ready (tlIn_c_ready), // @[MixedNode.scala:551:17] .auto_out_c_valid (tlIn_c_valid), .auto_out_c_bits_opcode (tlIn_c_bits_opcode), .auto_out_c_bits_param (tlIn_c_bits_param), .auto_out_c_bits_size (tlIn_c_bits_size), .auto_out_c_bits_source (tlIn_c_bits_source), .auto_out_c_bits_address (tlIn_c_bits_address), .auto_out_c_bits_data (tlIn_c_bits_data), .auto_out_c_bits_corrupt (tlIn_c_bits_corrupt), .auto_out_d_ready (tlIn_d_ready), .auto_out_d_valid (tlIn_d_valid), // @[MixedNode.scala:551:17] .auto_out_d_bits_opcode (tlIn_d_bits_opcode), // @[MixedNode.scala:551:17] .auto_out_d_bits_param (tlIn_d_bits_param), // @[MixedNode.scala:551:17] .auto_out_d_bits_size (tlIn_d_bits_size), // @[MixedNode.scala:551:17] .auto_out_d_bits_source (tlIn_d_bits_source), // @[MixedNode.scala:551:17] .auto_out_d_bits_sink (tlIn_d_bits_sink), // @[MixedNode.scala:551:17] .auto_out_d_bits_denied (tlIn_d_bits_denied), // @[MixedNode.scala:551:17] .auto_out_d_bits_data (tlIn_d_bits_data), // @[MixedNode.scala:551:17] .auto_out_d_bits_corrupt (tlIn_d_bits_corrupt), // @[MixedNode.scala:551:17] .auto_out_e_valid (tlIn_e_valid), .auto_out_e_bits_sink (tlIn_e_bits_sink) ); // @[TLPrefetcher.scala:120:32] assign auto_tl_master_clock_xing_in_a_ready = auto_tl_master_clock_xing_in_a_ready_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_b_valid = auto_tl_master_clock_xing_in_b_valid_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_b_bits_param = auto_tl_master_clock_xing_in_b_bits_param_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_b_bits_address = auto_tl_master_clock_xing_in_b_bits_address_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_c_ready = auto_tl_master_clock_xing_in_c_ready_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_d_valid = auto_tl_master_clock_xing_in_d_valid_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_d_bits_opcode = auto_tl_master_clock_xing_in_d_bits_opcode_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_d_bits_param = auto_tl_master_clock_xing_in_d_bits_param_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_d_bits_size = auto_tl_master_clock_xing_in_d_bits_size_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_d_bits_source = auto_tl_master_clock_xing_in_d_bits_source_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_d_bits_sink = auto_tl_master_clock_xing_in_d_bits_sink_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_d_bits_denied = auto_tl_master_clock_xing_in_d_bits_denied_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_d_bits_data = auto_tl_master_clock_xing_in_d_bits_data_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_master_clock_xing_in_d_bits_corrupt = auto_tl_master_clock_xing_in_d_bits_corrupt_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_a_valid = auto_tl_out_a_valid_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_a_bits_opcode = auto_tl_out_a_bits_opcode_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_a_bits_param = auto_tl_out_a_bits_param_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_a_bits_size = auto_tl_out_a_bits_size_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_a_bits_source = auto_tl_out_a_bits_source_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_a_bits_address = auto_tl_out_a_bits_address_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_a_bits_mask = auto_tl_out_a_bits_mask_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_a_bits_data = auto_tl_out_a_bits_data_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_a_bits_corrupt = auto_tl_out_a_bits_corrupt_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_b_ready = auto_tl_out_b_ready_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_c_valid = auto_tl_out_c_valid_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_c_bits_opcode = auto_tl_out_c_bits_opcode_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_c_bits_param = auto_tl_out_c_bits_param_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_c_bits_size = auto_tl_out_c_bits_size_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_c_bits_source = auto_tl_out_c_bits_source_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_c_bits_address = auto_tl_out_c_bits_address_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_c_bits_data = auto_tl_out_c_bits_data_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_c_bits_corrupt = auto_tl_out_c_bits_corrupt_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_d_ready = auto_tl_out_d_ready_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_e_valid = auto_tl_out_e_valid_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_out_e_bits_sink = auto_tl_out_e_bits_sink_0; // @[LazyModuleImp.scala:138:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module PE_429( // @[PE.scala:31:7] input clock, // @[PE.scala:31:7] input reset, // @[PE.scala:31:7] input [7:0] io_in_a, // @[PE.scala:35:14] input [19:0] io_in_b, // @[PE.scala:35:14] input [19:0] io_in_d, // @[PE.scala:35:14] output [7:0] io_out_a, // @[PE.scala:35:14] output [19:0] io_out_b, // @[PE.scala:35:14] output [19:0] io_out_c, // @[PE.scala:35:14] input io_in_control_dataflow, // @[PE.scala:35:14] input io_in_control_propagate, // @[PE.scala:35:14] input [4:0] io_in_control_shift, // @[PE.scala:35:14] output io_out_control_dataflow, // @[PE.scala:35:14] output io_out_control_propagate, // @[PE.scala:35:14] output [4:0] io_out_control_shift, // @[PE.scala:35:14] input [2:0] io_in_id, // @[PE.scala:35:14] output [2:0] io_out_id, // @[PE.scala:35:14] input io_in_last, // @[PE.scala:35:14] output io_out_last, // @[PE.scala:35:14] input io_in_valid, // @[PE.scala:35:14] output io_out_valid // @[PE.scala:35:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7] wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7] wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7] wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7] wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7] wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7] wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7] wire io_in_last_0 = io_in_last; // @[PE.scala:31:7] wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7] wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7] wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60] wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60] wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7] wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37] wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37] wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35] wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7] wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7] wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7] wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7] wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7] wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7] wire [19:0] io_out_b_0; // @[PE.scala:31:7] wire [19:0] io_out_c_0; // @[PE.scala:31:7] reg [7:0] c1; // @[PE.scala:70:15] wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15] wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38] reg [7:0] c2; // @[PE.scala:71:15] wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15] wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38] reg last_s; // @[PE.scala:89:25] wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21] wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25] wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25] wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32] wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32] wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25] wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15] wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}] wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25] wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27] wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27] wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25] wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15] wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15] assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37] wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37] wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}] wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15] wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}] wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15] wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15] assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37] wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37] wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}] wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38] wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16] wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38] wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35] wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35] always @(posedge clock) begin // @[PE.scala:31:7] if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8] c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15] if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8] c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15] if (io_in_valid_0) // @[PE.scala:31:7] last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25] always @(posedge) MacUnit_173 mac_unit ( // @[PE.scala:64:24] .clock (clock), .reset (reset), .io_in_a (io_in_a_0), // @[PE.scala:31:7] .io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}] .io_in_c (io_in_b_0), // @[PE.scala:31:7] .io_out_d (io_out_b_0) ); // @[PE.scala:64:24] assign io_out_a = io_out_a_0; // @[PE.scala:31:7] assign io_out_b = io_out_b_0; // @[PE.scala:31:7] assign io_out_c = io_out_c_0; // @[PE.scala:31:7] assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7] assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7] assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7] assign io_out_id = io_out_id_0; // @[PE.scala:31:7] assign io_out_last = io_out_last_0; // @[PE.scala:31:7] assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Buffer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.BufferParams class TLBufferNode ( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit valName: ValName) extends TLAdapterNode( clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) }, managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) } ) { override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}" override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none) } class TLBuffer( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters) extends LazyModule { def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace) def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde) def this()(implicit p: Parameters) = this(BufferParams.default) val node = new TLBufferNode(a, b, c, d, e) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def headBundle = node.out.head._2.bundle override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.a <> a(in .a) in .d <> d(out.d) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> b(out.b) out.c <> c(in .c) out.e <> e(in .e) } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLBuffer { def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default) def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde) def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace) def apply( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters): TLNode = { val buffer = LazyModule(new TLBuffer(a, b, c, d, e)) buffer.node } def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = { val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) } name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } } buffers.map(_.node) } def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = { chain(depth, name) .reduceLeftOption(_ :*=* _) .getOrElse(TLNameNode("no_buffer")) } } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module TLBuffer_a28d64s5k1z3u_1( // @[Buffer.scala:40:9] input clock, // @[Buffer.scala:40:9] input reset, // @[Buffer.scala:40:9] output auto_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [4:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [27:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [4:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [4:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [27:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [4:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_out_d_bits_data // @[LazyModuleImp.scala:107:25] ); wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9] wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Buffer.scala:40:9] wire [2:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9] wire [4:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Buffer.scala:40:9] wire [27:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9] wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9] wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9] wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[Buffer.scala:40:9] wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9] wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9] wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9] wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9] wire [4:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9] wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9] wire auto_out_d_bits_sink = 1'h0; // @[Decoupled.scala:362:21] wire auto_out_d_bits_denied = 1'h0; // @[Decoupled.scala:362:21] wire auto_out_d_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21] wire nodeOut_d_bits_sink = 1'h0; // @[Decoupled.scala:362:21] wire nodeOut_d_bits_denied = 1'h0; // @[Decoupled.scala:362:21] wire nodeOut_d_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21] wire [1:0] auto_out_d_bits_param = 2'h0; // @[Decoupled.scala:362:21] wire nodeIn_a_ready; // @[MixedNode.scala:551:17] wire [1:0] nodeOut_d_bits_param = 2'h0; // @[Decoupled.scala:362:21] wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9] wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9] wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Buffer.scala:40:9] wire [2:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9] wire [4:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Buffer.scala:40:9] wire [27:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9] wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9] wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9] wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[Buffer.scala:40:9] wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9] wire nodeIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17] wire [2:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17] wire [4:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17] wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17] wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17] wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9] wire nodeOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17] wire [4:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17] wire [27:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17] wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire nodeOut_d_ready; // @[MixedNode.scala:542:17] wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9] wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9] wire [2:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9] wire [4:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9] wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9] wire auto_in_a_ready_0; // @[Buffer.scala:40:9] wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9] wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9] wire [2:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9] wire [4:0] auto_in_d_bits_source_0; // @[Buffer.scala:40:9] wire auto_in_d_bits_sink_0; // @[Buffer.scala:40:9] wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9] wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9] wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9] wire auto_in_d_valid_0; // @[Buffer.scala:40:9] wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9] wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9] wire [2:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9] wire [4:0] auto_out_a_bits_source_0; // @[Buffer.scala:40:9] wire [27:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9] wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9] wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9] wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9] wire auto_out_a_valid_0; // @[Buffer.scala:40:9] wire auto_out_d_ready_0; // @[Buffer.scala:40:9] assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9] assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9] assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9] assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9] assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9] assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9] assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9] assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9] assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9] assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9] assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9] assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9] assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9] assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9] assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9] assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9] assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9] assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9] assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9] assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9] TLMonitor_61 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17] .io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17] .io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17] .io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17] .io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17] .io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17] .io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17] .io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17] .io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17] .io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17] .io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17] .io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17] .io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17] .io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17] .io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17] .io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17] .io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17] .io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17] .io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17] .io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17] ); // @[Nodes.scala:27:25] Queue2_TLBundleA_a28d64s5k1z3u_1 nodeOut_a_q ( // @[Decoupled.scala:362:21] .clock (clock), .reset (reset), .io_enq_ready (nodeIn_a_ready), .io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17] .io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17] .io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17] .io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17] .io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17] .io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17] .io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17] .io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17] .io_enq_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17] .io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17] .io_deq_valid (nodeOut_a_valid), .io_deq_bits_opcode (nodeOut_a_bits_opcode), .io_deq_bits_param (nodeOut_a_bits_param), .io_deq_bits_size (nodeOut_a_bits_size), .io_deq_bits_source (nodeOut_a_bits_source), .io_deq_bits_address (nodeOut_a_bits_address), .io_deq_bits_mask (nodeOut_a_bits_mask), .io_deq_bits_data (nodeOut_a_bits_data), .io_deq_bits_corrupt (nodeOut_a_bits_corrupt) ); // @[Decoupled.scala:362:21] Queue2_TLBundleD_a28d64s5k1z3u_1 nodeIn_d_q ( // @[Decoupled.scala:362:21] .clock (clock), .reset (reset), .io_enq_ready (nodeOut_d_ready), .io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17] .io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17] .io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17] .io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17] .io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17] .io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17] .io_deq_valid (nodeIn_d_valid), .io_deq_bits_opcode (nodeIn_d_bits_opcode), .io_deq_bits_param (nodeIn_d_bits_param), .io_deq_bits_size (nodeIn_d_bits_size), .io_deq_bits_source (nodeIn_d_bits_source), .io_deq_bits_sink (nodeIn_d_bits_sink), .io_deq_bits_denied (nodeIn_d_bits_denied), .io_deq_bits_data (nodeIn_d_bits_data), .io_deq_bits_corrupt (nodeIn_d_bits_corrupt) ); // @[Decoupled.scala:362:21] assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9] assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9] assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9] assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9] assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9] assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9] assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9] assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9] assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9] assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9] assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9] assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9] assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9] assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9] assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9] assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9] assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9] assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9] assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9] assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File RegisterRouter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes} import freechips.rocketchip.resources.{Device, Resource, ResourceBindings} import freechips.rocketchip.prci.{NoCrossing} import freechips.rocketchip.regmapper.{RegField, RegMapper, RegMapperParams, RegMapperInput, RegisterRouter} import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, GenRegDescsAnno} import scala.math.min class TLRegisterRouterExtraBundle(val sourceBits: Int, val sizeBits: Int) extends Bundle { val source = UInt((sourceBits max 1).W) val size = UInt((sizeBits max 1).W) } case object TLRegisterRouterExtra extends ControlKey[TLRegisterRouterExtraBundle]("tlrr_extra") case class TLRegisterRouterExtraField(sourceBits: Int, sizeBits: Int) extends BundleField[TLRegisterRouterExtraBundle](TLRegisterRouterExtra, Output(new TLRegisterRouterExtraBundle(sourceBits, sizeBits)), x => { x.size := 0.U x.source := 0.U }) /** TLRegisterNode is a specialized TL SinkNode that encapsulates MMIO registers. * It provides functionality for describing and outputting metdata about the registers in several formats. * It also provides a concrete implementation of a regmap function that will be used * to wire a map of internal registers associated with this node to the node's interconnect port. */ case class TLRegisterNode( address: Seq[AddressSet], device: Device, deviceKey: String = "reg/control", concurrency: Int = 0, beatBytes: Int = 4, undefZero: Boolean = true, executable: Boolean = false)( implicit valName: ValName) extends SinkNode(TLImp)(Seq(TLSlavePortParameters.v1( Seq(TLSlaveParameters.v1( address = address, resources = Seq(Resource(device, deviceKey)), executable = executable, supportsGet = TransferSizes(1, beatBytes), supportsPutPartial = TransferSizes(1, beatBytes), supportsPutFull = TransferSizes(1, beatBytes), fifoId = Some(0))), // requests are handled in order beatBytes = beatBytes, minLatency = min(concurrency, 1)))) with TLFormatNode // the Queue adds at most one cycle { val size = 1 << log2Ceil(1 + address.map(_.max).max - address.map(_.base).min) require (size >= beatBytes) address.foreach { case a => require (a.widen(size-1).base == address.head.widen(size-1).base, s"TLRegisterNode addresses (${address}) must be aligned to its size ${size}") } // Calling this method causes the matching TL2 bundle to be // configured to route all requests to the listed RegFields. def regmap(mapping: RegField.Map*) = { val (bundleIn, edge) = this.in(0) val a = bundleIn.a val d = bundleIn.d val fields = TLRegisterRouterExtraField(edge.bundle.sourceBits, edge.bundle.sizeBits) +: a.bits.params.echoFields val params = RegMapperParams(log2Up(size/beatBytes), beatBytes, fields) val in = Wire(Decoupled(new RegMapperInput(params))) in.bits.read := a.bits.opcode === TLMessages.Get in.bits.index := edge.addr_hi(a.bits) in.bits.data := a.bits.data in.bits.mask := a.bits.mask Connectable.waiveUnmatched(in.bits.extra, a.bits.echo) match { case (lhs, rhs) => lhs :<= rhs } val a_extra = in.bits.extra(TLRegisterRouterExtra) a_extra.source := a.bits.source a_extra.size := a.bits.size // Invoke the register map builder val out = RegMapper(beatBytes, concurrency, undefZero, in, mapping:_*) // No flow control needed in.valid := a.valid a.ready := in.ready d.valid := out.valid out.ready := d.ready // We must restore the size to enable width adapters to work val d_extra = out.bits.extra(TLRegisterRouterExtra) d.bits := edge.AccessAck(toSource = d_extra.source, lgSize = d_extra.size) // avoid a Mux on the data bus by manually overriding two fields d.bits.data := out.bits.data Connectable.waiveUnmatched(d.bits.echo, out.bits.extra) match { case (lhs, rhs) => lhs :<= rhs } d.bits.opcode := Mux(out.bits.read, TLMessages.AccessAckData, TLMessages.AccessAck) // Tie off unused channels bundleIn.b.valid := false.B bundleIn.c.ready := true.B bundleIn.e.ready := true.B genRegDescsJson(mapping:_*) } def genRegDescsJson(mapping: RegField.Map*): Unit = { // Dump out the register map for documentation purposes. val base = address.head.base val baseHex = s"0x${base.toInt.toHexString}" val name = s"${device.describe(ResourceBindings()).name}.At${baseHex}" val json = GenRegDescsAnno.serialize(base, name, mapping:_*) var suffix = 0 while( ElaborationArtefacts.contains(s"${baseHex}.${suffix}.regmap.json")) { suffix = suffix + 1 } ElaborationArtefacts.add(s"${baseHex}.${suffix}.regmap.json", json) val module = Module.currentModule.get.asInstanceOf[RawModule] GenRegDescsAnno.anno( module, base, mapping:_*) } } /** Mix HasTLControlRegMap into any subclass of RegisterRouter to gain helper functions for attaching a device control register map to TileLink. * - The intended use case is that controlNode will diplomatically publish a SW-visible device's memory-mapped control registers. * - Use the clock crossing helper controlXing to externally connect controlNode to a TileLink interconnect. * - Use the mapping helper function regmap to internally fill out the space of device control registers. */ trait HasTLControlRegMap { this: RegisterRouter => protected val controlNode = TLRegisterNode( address = address, device = device, deviceKey = "reg/control", concurrency = concurrency, beatBytes = beatBytes, undefZero = undefZero, executable = executable) // Externally, this helper should be used to connect the register control port to a bus val controlXing: TLInwardClockCrossingHelper = this.crossIn(controlNode) // Backwards-compatibility default node accessor with no clock crossing lazy val node: TLInwardNode = controlXing(NoCrossing) // Internally, this function should be used to populate the control port with registers protected def regmap(mapping: RegField.Map*): Unit = { controlNode.regmap(mapping:_*) } } File TileResetSetter.scala: package chipyard.clocking import chisel3._ import chisel3.util._ import chisel3.experimental.Analog import org.chipsalliance.cde.config._ import freechips.rocketchip.subsystem._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.prci._ import freechips.rocketchip.util._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.devices.tilelink._ import freechips.rocketchip.regmapper._ import freechips.rocketchip.subsystem._ // Currently only works if all tiles are already driven by independent clock groups // TODO: After https://github.com/chipsalliance/rocket-chip/pull/2842 is merged, we should // always put all tiles on independent clock groups class TileResetSetter(address: BigInt, beatBytes: Int, tileNames: Seq[String], initResetHarts: Seq[Int])(implicit p: Parameters) extends LazyModule { val device = new SimpleDevice("tile-reset-setter", Nil) val tlNode = TLRegisterNode(Seq(AddressSet(address, 4096-1)), device, "reg/control", beatBytes=beatBytes) val clockNode = ClockGroupIdentityNode() lazy val module = new LazyModuleImp(this) { val nTiles = p(TilesLocated(InSubsystem)).size require (nTiles <= 4096 / 4) val tile_async_resets = Wire(Vec(nTiles, Reset())) val r_tile_resets = (0 until nTiles).map({ i => tile_async_resets(i) := true.B.asAsyncReset // Remove this line after https://github.com/chipsalliance/rocket-chip/pull/2842 withReset (tile_async_resets(i)) { Module(new AsyncResetRegVec(w=1, init=(if (initResetHarts.contains(i)) 1 else 0))) } }) if (nTiles > 0) tlNode.regmap((0 until nTiles).map({ i => i * 4 -> Seq(RegField.rwReg(1, r_tile_resets(i).io)) }): _*) val tileMap = tileNames.zipWithIndex.map({ case (n, i) => n -> (tile_async_resets(i), r_tile_resets(i).io.q, address + i * 4) }) (clockNode.out zip clockNode.in).map { case ((o, _), (i, _)) => (o.member.elements zip i.member.elements).foreach { case ((name, oD), (_, iD)) => oD.clock := iD.clock oD.reset := iD.reset for ((n, (rIn, rOut, addr)) <- tileMap) { if (name.contains(n)) { println(s"${addr.toString(16)}: Tile $name reset control") // Async because the reset coming out of the AsyncResetRegVec is // clocked to the bus this is attached to, not the clock in this // clock bundle. We expect a ClockGroupResetSynchronizer downstream // to synchronize the resets // Also, this or enforces that the tiles come out of reset after the reset of the system oD.reset := (rOut.asBool || iD.reset.asBool).asAsyncReset rIn := iD.reset } } } } } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } }
module TileResetSetter( // @[TileResetSetter.scala:26:25] input clock, // @[TileResetSetter.scala:26:25] input reset, // @[TileResetSetter.scala:26:25] input auto_clock_in_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25] input auto_clock_in_member_allClocks_uncore_reset, // @[LazyModuleImp.scala:107:25] output auto_clock_out_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25] output auto_clock_out_member_allClocks_uncore_reset, // @[LazyModuleImp.scala:107:25] output auto_tl_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_tl_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [1:0] auto_tl_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [10:0] auto_tl_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [20:0] auto_tl_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_tl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input auto_tl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_tl_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_tl_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [10:0] auto_tl_in_d_bits_source // @[LazyModuleImp.scala:107:25] ); wire [2:0] tlNodeIn_d_bits_opcode = {2'h0, auto_tl_in_a_bits_opcode == 3'h4}; // @[RegisterRouter.scala:74:36, :105:19] TLMonitor_59 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (auto_tl_in_d_ready), .io_in_a_valid (auto_tl_in_a_valid), .io_in_a_bits_opcode (auto_tl_in_a_bits_opcode), .io_in_a_bits_param (auto_tl_in_a_bits_param), .io_in_a_bits_size (auto_tl_in_a_bits_size), .io_in_a_bits_source (auto_tl_in_a_bits_source), .io_in_a_bits_address (auto_tl_in_a_bits_address), .io_in_a_bits_mask (auto_tl_in_a_bits_mask), .io_in_a_bits_corrupt (auto_tl_in_a_bits_corrupt), .io_in_d_ready (auto_tl_in_d_ready), .io_in_d_valid (auto_tl_in_a_valid), .io_in_d_bits_opcode (tlNodeIn_d_bits_opcode), // @[RegisterRouter.scala:105:19] .io_in_d_bits_size (auto_tl_in_a_bits_size), .io_in_d_bits_source (auto_tl_in_a_bits_source) ); // @[Nodes.scala:27:25] assign auto_clock_out_member_allClocks_uncore_clock = auto_clock_in_member_allClocks_uncore_clock; // @[TileResetSetter.scala:26:25] assign auto_clock_out_member_allClocks_uncore_reset = auto_clock_in_member_allClocks_uncore_reset; // @[TileResetSetter.scala:26:25] assign auto_tl_in_a_ready = auto_tl_in_d_ready; // @[TileResetSetter.scala:26:25] assign auto_tl_in_d_valid = auto_tl_in_a_valid; // @[TileResetSetter.scala:26:25] assign auto_tl_in_d_bits_opcode = tlNodeIn_d_bits_opcode; // @[RegisterRouter.scala:105:19] assign auto_tl_in_d_bits_size = auto_tl_in_a_bits_size; // @[TileResetSetter.scala:26:25] assign auto_tl_in_d_bits_source = auto_tl_in_a_bits_source; // @[TileResetSetter.scala:26:25] endmodule
Generate the Verilog code corresponding to the following Chisel files. File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready }
module dataMems_494( // @[UnsafeAXI4ToTL.scala:365:62] input [4:0] R0_addr, input R0_en, input R0_clk, output [66:0] R0_data, input [4:0] W0_addr, input W0_en, input W0_clk, input [66:0] W0_data ); dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data) ); // @[UnsafeAXI4ToTL.scala:365:62] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_127( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] output io_q // @[ShiftReg.scala:36:14] ); wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19] wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File DivSqrtRecFN_small.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2017 SiFive, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of SiFive nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY SIFIVE AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL SIFIVE OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ /* s = sigWidth c_i = newBit Division: width of a is (s+2) Normal ------ (qi + ci * 2^(-i))*b <= a q0 = 0 r0 = a q(i+1) = qi + ci*2^(-i) ri = a - qi*b r(i+1) = a - q(i+1)*b = a - qi*b - ci*2^(-i)*b r(i+1) = ri - ci*2^(-i)*b ci = ri >= 2^(-i)*b summary_i = ri != 0 i = 0 to s+1 (s+1)th bit plus summary_(i+1) gives enough information for rounding If (a < b), then we need to calculate (s+2)th bit and summary_(i+1) because we need s bits ignoring the leading zero. (This is skipCycle2 part of Hauser's code.) Hauser ------ sig_i = qi rem_i = 2^(i-2)*ri cycle_i = s+3-i sig_0 = 0 rem_0 = a/4 cycle_0 = s+3 bit_0 = 2^0 (= 2^(s+1), since we represent a, b and q with (s+2) bits) sig(i+1) = sig(i) + ci*bit_i rem(i+1) = 2rem_i - ci*b/2 ci = 2rem_i >= b/2 bit_i = 2^-i (=2^(cycle_i-2), since we represent a, b and q with (s+2) bits) cycle(i+1) = cycle_i-1 summary_1 = a <> b summary(i+1) = if ci then 2rem_i-b/2 <> 0 else summary_i, i <> 0 Proof: 2^i*r(i+1) = 2^i*ri - ci*b. Qed ci = 2^i*ri >= b. Qed summary(i+1) = if ci then rem(i+1) else summary_i, i <> 0 Now, note that all of ck's cannot be 0, since that means a is 0. So when you traverse through a chain of 0 ck's, from the end, eventually, you reach a non-zero cj. That is exactly the value of ri as the reminder remains the same. When all ck's are 0 except c0 (which must be 1) then summary_1 is set correctly according to r1 = a-b != 0. So summary(i+1) is always set correctly according to r(i+1) Square root: width of a is (s+1) Normal ------ (xi + ci*2^(-i))^2 <= a xi^2 + ci*2^(-i)*(2xi+ci*2^(-i)) <= a x0 = 0 x(i+1) = xi + ci*2^(-i) ri = a - xi^2 r(i+1) = a - x(i+1)^2 = a - (xi^2 + ci*2^(-i)*(2xi+ci*2^(-i))) = ri - ci*2^(-i)*(2xi+ci*2^(-i)) = ri - ci*2^(-i)*(2xi+2^(-i)) // ci is always 0 or 1 ci = ri >= 2^(-i)*(2xi + 2^(-i)) summary_i = ri != 0 i = 0 to s+1 For odd expression, do 2 steps initially. (s+1)th bit plus summary_(i+1) gives enough information for rounding. Hauser ------ sig_i = xi rem_i = ri*2^(i-1) cycle_i = s+2-i bit_i = 2^(-i) (= 2^(s-i) = 2^(cycle_i-2) in terms of bit representation) sig_0 = 0 rem_0 = a/2 cycle_0 = s+2 bit_0 = 1 (= 2^s in terms of bit representation) sig(i+1) = sig_i + ci * bit_i rem(i+1) = 2rem_i - ci*(2sig_i + bit_i) ci = 2*sig_i + bit_i <= 2*rem_i bit_i = 2^(cycle_i-2) (in terms of bit representation) cycle(i+1) = cycle_i-1 summary_1 = a - (2^s) (in terms of bit representation) summary(i+1) = if ci then rem(i+1) <> 0 else summary_i, i <> 0 Proof: ci = 2*sig_i + bit_i <= 2*rem_i ci = 2xi + 2^(-i) <= ri*2^i. Qed sig(i+1) = sig_i + ci * bit_i x(i+1) = xi + ci*2^(-i). Qed rem(i+1) = 2rem_i - ci*(2sig_i + bit_i) r(i+1)*2^i = ri*2^i - ci*(2xi + 2^(-i)) r(i+1) = ri - ci*2^(-i)*(2xi + 2^(-i)). Qed Same argument as before for summary. ------------------------------ Note that all registers are updated normally until cycle == 2. At cycle == 2, rem is not updated, but all other registers are updated normally. But, cycle == 1 does not read rem to calculate anything (note that final summary is calculated using the values at cycle = 2). */ package hardfloat import chisel3._ import chisel3.util._ import consts._ /*---------------------------------------------------------------------------- | Computes a division or square root for floating-point in recoded form. | Multiple clock cycles are needed for each division or square-root operation, | except possibly in special cases. *----------------------------------------------------------------------------*/ class DivSqrtRawFN_small(expWidth: Int, sigWidth: Int, options: Int) extends Module { override def desiredName = s"DivSqrtRawFN_small_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { /*-------------------------------------------------------------------- *--------------------------------------------------------------------*/ val inReady = Output(Bool()) val inValid = Input(Bool()) val sqrtOp = Input(Bool()) val a = Input(new RawFloat(expWidth, sigWidth)) val b = Input(new RawFloat(expWidth, sigWidth)) val roundingMode = Input(UInt(3.W)) /*-------------------------------------------------------------------- *--------------------------------------------------------------------*/ val rawOutValid_div = Output(Bool()) val rawOutValid_sqrt = Output(Bool()) val roundingModeOut = Output(UInt(3.W)) val invalidExc = Output(Bool()) val infiniteExc = Output(Bool()) val rawOut = Output(new RawFloat(expWidth, sigWidth + 2)) }) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ val cycleNum = RegInit(0.U(log2Ceil(sigWidth + 3).W)) val inReady = RegInit(true.B) // <-> (cycleNum <= 1) val rawOutValid = RegInit(false.B) // <-> (cycleNum === 1) val sqrtOp_Z = Reg(Bool()) val majorExc_Z = Reg(Bool()) //*** REDUCE 3 BITS TO 2-BIT CODE: val isNaN_Z = Reg(Bool()) val isInf_Z = Reg(Bool()) val isZero_Z = Reg(Bool()) val sign_Z = Reg(Bool()) val sExp_Z = Reg(SInt((expWidth + 2).W)) val fractB_Z = Reg(UInt(sigWidth.W)) val roundingMode_Z = Reg(UInt(3.W)) /*------------------------------------------------------------------------ | (The most-significant and least-significant bits of 'rem_Z' are needed | only for square roots.) *------------------------------------------------------------------------*/ val rem_Z = Reg(UInt((sigWidth + 2).W)) val notZeroRem_Z = Reg(Bool()) val sigX_Z = Reg(UInt((sigWidth + 2).W)) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ val rawA_S = io.a val rawB_S = io.b //*** IMPROVE THESE: val notSigNaNIn_invalidExc_S_div = (rawA_S.isZero && rawB_S.isZero) || (rawA_S.isInf && rawB_S.isInf) val notSigNaNIn_invalidExc_S_sqrt = ! rawA_S.isNaN && ! rawA_S.isZero && rawA_S.sign val majorExc_S = Mux(io.sqrtOp, isSigNaNRawFloat(rawA_S) || notSigNaNIn_invalidExc_S_sqrt, isSigNaNRawFloat(rawA_S) || isSigNaNRawFloat(rawB_S) || notSigNaNIn_invalidExc_S_div || (! rawA_S.isNaN && ! rawA_S.isInf && rawB_S.isZero) ) val isNaN_S = Mux(io.sqrtOp, rawA_S.isNaN || notSigNaNIn_invalidExc_S_sqrt, rawA_S.isNaN || rawB_S.isNaN || notSigNaNIn_invalidExc_S_div ) val isInf_S = Mux(io.sqrtOp, rawA_S.isInf, rawA_S.isInf || rawB_S.isZero) val isZero_S = Mux(io.sqrtOp, rawA_S.isZero, rawA_S.isZero || rawB_S.isInf) val sign_S = rawA_S.sign ^ (! io.sqrtOp && rawB_S.sign) val specialCaseA_S = rawA_S.isNaN || rawA_S.isInf || rawA_S.isZero val specialCaseB_S = rawB_S.isNaN || rawB_S.isInf || rawB_S.isZero val normalCase_S_div = ! specialCaseA_S && ! specialCaseB_S val normalCase_S_sqrt = ! specialCaseA_S && ! rawA_S.sign val normalCase_S = Mux(io.sqrtOp, normalCase_S_sqrt, normalCase_S_div) val sExpQuot_S_div = rawA_S.sExp +& Cat(rawB_S.sExp(expWidth), ~rawB_S.sExp(expWidth - 1, 0)).asSInt //*** IS THIS OPTIMAL?: val sSatExpQuot_S_div = Cat(Mux(((BigInt(7)<<(expWidth - 2)).S <= sExpQuot_S_div), 6.U, sExpQuot_S_div(expWidth + 1, expWidth - 2) ), sExpQuot_S_div(expWidth - 3, 0) ).asSInt val evenSqrt_S = io.sqrtOp && ! rawA_S.sExp(0) val oddSqrt_S = io.sqrtOp && rawA_S.sExp(0) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ val idle = cycleNum === 0.U val entering = inReady && io.inValid val entering_normalCase = entering && normalCase_S val processTwoBits = cycleNum >= 3.U && ((options & divSqrtOpt_twoBitsPerCycle) != 0).B val skipCycle2 = cycleNum === 3.U && sigX_Z(sigWidth + 1) && ((options & divSqrtOpt_twoBitsPerCycle) == 0).B when (! idle || entering) { def computeCycleNum(f: UInt => UInt): UInt = { Mux(entering & ! normalCase_S, f(1.U), 0.U) | Mux(entering_normalCase, Mux(io.sqrtOp, Mux(rawA_S.sExp(0), f(sigWidth.U), f((sigWidth + 1).U)), f((sigWidth + 2).U) ), 0.U ) | Mux(! entering && ! skipCycle2, f(cycleNum - Mux(processTwoBits, 2.U, 1.U)), 0.U) | Mux(skipCycle2, f(1.U), 0.U) } inReady := computeCycleNum(_ <= 1.U).asBool rawOutValid := computeCycleNum(_ === 1.U).asBool cycleNum := computeCycleNum(x => x) } io.inReady := inReady /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ when (entering) { sqrtOp_Z := io.sqrtOp majorExc_Z := majorExc_S isNaN_Z := isNaN_S isInf_Z := isInf_S isZero_Z := isZero_S sign_Z := sign_S sExp_Z := Mux(io.sqrtOp, (rawA_S.sExp>>1) +& (BigInt(1)<<(expWidth - 1)).S, sSatExpQuot_S_div ) roundingMode_Z := io.roundingMode } when (entering || ! inReady && sqrtOp_Z) { fractB_Z := Mux(inReady && ! io.sqrtOp, rawB_S.sig(sigWidth - 2, 0)<<1, 0.U) | Mux(inReady && io.sqrtOp && rawA_S.sExp(0), (BigInt(1)<<(sigWidth - 2)).U, 0.U) | Mux(inReady && io.sqrtOp && ! rawA_S.sExp(0), (BigInt(1)<<(sigWidth - 1)).U, 0.U) | Mux(! inReady /* sqrtOp_Z */ && processTwoBits, fractB_Z>>2, 0.U) | Mux(! inReady /* sqrtOp_Z */ && ! processTwoBits, fractB_Z>>1, 0.U) } /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ val rem = Mux(inReady && ! oddSqrt_S, rawA_S.sig<<1, 0.U) | Mux(inReady && oddSqrt_S, Cat(rawA_S.sig(sigWidth - 1, sigWidth - 2) - 1.U, rawA_S.sig(sigWidth - 3, 0)<<3 ), 0.U ) | Mux(! inReady, rem_Z<<1, 0.U) val bitMask = (1.U<<cycleNum)>>2 val trialTerm = Mux(inReady && ! io.sqrtOp, rawB_S.sig<<1, 0.U) | Mux(inReady && evenSqrt_S, (BigInt(1)<<sigWidth).U, 0.U) | Mux(inReady && oddSqrt_S, (BigInt(5)<<(sigWidth - 1)).U, 0.U) | Mux(! inReady, fractB_Z, 0.U) | Mux(! inReady && ! sqrtOp_Z, 1.U << sigWidth, 0.U) | Mux(! inReady && sqrtOp_Z, sigX_Z<<1, 0.U) val trialRem = rem.zext -& trialTerm.zext val newBit = (0.S <= trialRem) val nextRem_Z = Mux(newBit, trialRem.asUInt, rem)(sigWidth + 1, 0) val rem2 = nextRem_Z<<1 val trialTerm2_newBit0 = Mux(sqrtOp_Z, fractB_Z>>1 | sigX_Z<<1, fractB_Z | (1.U << sigWidth)) val trialTerm2_newBit1 = trialTerm2_newBit0 | Mux(sqrtOp_Z, fractB_Z<<1, 0.U) val trialRem2 = Mux(newBit, (trialRem<<1) - trialTerm2_newBit1.zext, (rem_Z<<2)(sigWidth+2, 0).zext - trialTerm2_newBit0.zext) val newBit2 = (0.S <= trialRem2) val nextNotZeroRem_Z = Mux(inReady || newBit, trialRem =/= 0.S, notZeroRem_Z) val nextNotZeroRem_Z_2 = // <-> Mux(newBit2, trialRem2 =/= 0.S, nextNotZeroRem_Z) processTwoBits && newBit && (0.S < (trialRem<<1) - trialTerm2_newBit1.zext) || processTwoBits && !newBit && (0.S < (rem_Z<<2)(sigWidth+2, 0).zext - trialTerm2_newBit0.zext) || !(processTwoBits && newBit2) && nextNotZeroRem_Z val nextRem_Z_2 = Mux(processTwoBits && newBit2, trialRem2.asUInt(sigWidth + 1, 0), 0.U) | Mux(processTwoBits && !newBit2, rem2(sigWidth + 1, 0), 0.U) | Mux(!processTwoBits, nextRem_Z, 0.U) when (entering || ! inReady) { notZeroRem_Z := nextNotZeroRem_Z_2 rem_Z := nextRem_Z_2 sigX_Z := Mux(inReady && ! io.sqrtOp, newBit<<(sigWidth + 1), 0.U) | Mux(inReady && io.sqrtOp, (BigInt(1)<<sigWidth).U, 0.U) | Mux(inReady && oddSqrt_S, newBit<<(sigWidth - 1), 0.U) | Mux(! inReady, sigX_Z, 0.U) | Mux(! inReady && newBit, bitMask, 0.U) | Mux(processTwoBits && newBit2, bitMask>>1, 0.U) } /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ io.rawOutValid_div := rawOutValid && ! sqrtOp_Z io.rawOutValid_sqrt := rawOutValid && sqrtOp_Z io.roundingModeOut := roundingMode_Z io.invalidExc := majorExc_Z && isNaN_Z io.infiniteExc := majorExc_Z && ! isNaN_Z io.rawOut.isNaN := isNaN_Z io.rawOut.isInf := isInf_Z io.rawOut.isZero := isZero_Z io.rawOut.sign := sign_Z io.rawOut.sExp := sExp_Z io.rawOut.sig := sigX_Z<<1 | notZeroRem_Z } /*---------------------------------------------------------------------------- *----------------------------------------------------------------------------*/ class DivSqrtRecFNToRaw_small(expWidth: Int, sigWidth: Int, options: Int) extends Module { override def desiredName = s"DivSqrtRecFMToRaw_small_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { /*-------------------------------------------------------------------- *--------------------------------------------------------------------*/ val inReady = Output(Bool()) val inValid = Input(Bool()) val sqrtOp = Input(Bool()) val a = Input(UInt((expWidth + sigWidth + 1).W)) val b = Input(UInt((expWidth + sigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) /*-------------------------------------------------------------------- *--------------------------------------------------------------------*/ val rawOutValid_div = Output(Bool()) val rawOutValid_sqrt = Output(Bool()) val roundingModeOut = Output(UInt(3.W)) val invalidExc = Output(Bool()) val infiniteExc = Output(Bool()) val rawOut = Output(new RawFloat(expWidth, sigWidth + 2)) }) val divSqrtRawFN = Module(new DivSqrtRawFN_small(expWidth, sigWidth, options)) io.inReady := divSqrtRawFN.io.inReady divSqrtRawFN.io.inValid := io.inValid divSqrtRawFN.io.sqrtOp := io.sqrtOp divSqrtRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a) divSqrtRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b) divSqrtRawFN.io.roundingMode := io.roundingMode io.rawOutValid_div := divSqrtRawFN.io.rawOutValid_div io.rawOutValid_sqrt := divSqrtRawFN.io.rawOutValid_sqrt io.roundingModeOut := divSqrtRawFN.io.roundingModeOut io.invalidExc := divSqrtRawFN.io.invalidExc io.infiniteExc := divSqrtRawFN.io.infiniteExc io.rawOut := divSqrtRawFN.io.rawOut } /*---------------------------------------------------------------------------- *----------------------------------------------------------------------------*/ class DivSqrtRecFN_small(expWidth: Int, sigWidth: Int, options: Int) extends Module { override def desiredName = s"DivSqrtRecFM_small_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { /*-------------------------------------------------------------------- *--------------------------------------------------------------------*/ val inReady = Output(Bool()) val inValid = Input(Bool()) val sqrtOp = Input(Bool()) val a = Input(UInt((expWidth + sigWidth + 1).W)) val b = Input(UInt((expWidth + sigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) /*-------------------------------------------------------------------- *--------------------------------------------------------------------*/ val outValid_div = Output(Bool()) val outValid_sqrt = Output(Bool()) val out = Output(UInt((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(UInt(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val divSqrtRecFNToRaw = Module(new DivSqrtRecFNToRaw_small(expWidth, sigWidth, options)) io.inReady := divSqrtRecFNToRaw.io.inReady divSqrtRecFNToRaw.io.inValid := io.inValid divSqrtRecFNToRaw.io.sqrtOp := io.sqrtOp divSqrtRecFNToRaw.io.a := io.a divSqrtRecFNToRaw.io.b := io.b divSqrtRecFNToRaw.io.roundingMode := io.roundingMode //------------------------------------------------------------------------ //------------------------------------------------------------------------ io.outValid_div := divSqrtRecFNToRaw.io.rawOutValid_div io.outValid_sqrt := divSqrtRecFNToRaw.io.rawOutValid_sqrt val roundRawFNToRecFN = Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0)) roundRawFNToRecFN.io.invalidExc := divSqrtRecFNToRaw.io.invalidExc roundRawFNToRecFN.io.infiniteExc := divSqrtRecFNToRaw.io.infiniteExc roundRawFNToRecFN.io.in := divSqrtRecFNToRaw.io.rawOut roundRawFNToRecFN.io.roundingMode := divSqrtRecFNToRaw.io.roundingModeOut roundRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundRawFNToRecFN.io.out io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags } File rawFloatFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ /*---------------------------------------------------------------------------- | In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be | set. *----------------------------------------------------------------------------*/ object rawFloatFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat = { val exp = in(expWidth + sigWidth - 1, sigWidth - 1) val isZero = exp(expWidth, expWidth - 2) === 0.U val isSpecial = exp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && exp(expWidth - 2) out.isInf := isSpecial && ! exp(expWidth - 2) out.isZero := isZero out.sign := in(expWidth + sigWidth) out.sExp := exp.zext out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0) out } }
module DivSqrtRecFMToRaw_small_e8_s24_5( // @[DivSqrtRecFN_small.scala:422:5] input clock, // @[DivSqrtRecFN_small.scala:422:5] input reset, // @[DivSqrtRecFN_small.scala:422:5] output io_inReady, // @[DivSqrtRecFN_small.scala:426:16] input io_inValid, // @[DivSqrtRecFN_small.scala:426:16] input io_sqrtOp, // @[DivSqrtRecFN_small.scala:426:16] input [32:0] io_a, // @[DivSqrtRecFN_small.scala:426:16] input [32:0] io_b, // @[DivSqrtRecFN_small.scala:426:16] input [2:0] io_roundingMode, // @[DivSqrtRecFN_small.scala:426:16] output io_rawOutValid_div, // @[DivSqrtRecFN_small.scala:426:16] output io_rawOutValid_sqrt, // @[DivSqrtRecFN_small.scala:426:16] output [2:0] io_roundingModeOut, // @[DivSqrtRecFN_small.scala:426:16] output io_invalidExc, // @[DivSqrtRecFN_small.scala:426:16] output io_infiniteExc, // @[DivSqrtRecFN_small.scala:426:16] output io_rawOut_isNaN, // @[DivSqrtRecFN_small.scala:426:16] output io_rawOut_isInf, // @[DivSqrtRecFN_small.scala:426:16] output io_rawOut_isZero, // @[DivSqrtRecFN_small.scala:426:16] output io_rawOut_sign, // @[DivSqrtRecFN_small.scala:426:16] output [9:0] io_rawOut_sExp, // @[DivSqrtRecFN_small.scala:426:16] output [26:0] io_rawOut_sig // @[DivSqrtRecFN_small.scala:426:16] ); wire io_inValid_0 = io_inValid; // @[DivSqrtRecFN_small.scala:422:5] wire io_sqrtOp_0 = io_sqrtOp; // @[DivSqrtRecFN_small.scala:422:5] wire [32:0] io_a_0 = io_a; // @[DivSqrtRecFN_small.scala:422:5] wire [32:0] io_b_0 = io_b; // @[DivSqrtRecFN_small.scala:422:5] wire [2:0] io_roundingMode_0 = io_roundingMode; // @[DivSqrtRecFN_small.scala:422:5] wire io_rawOut_isNaN_0; // @[DivSqrtRecFN_small.scala:422:5] wire io_rawOut_isInf_0; // @[DivSqrtRecFN_small.scala:422:5] wire io_rawOut_isZero_0; // @[DivSqrtRecFN_small.scala:422:5] wire io_rawOut_sign_0; // @[DivSqrtRecFN_small.scala:422:5] wire [9:0] io_rawOut_sExp_0; // @[DivSqrtRecFN_small.scala:422:5] wire [26:0] io_rawOut_sig_0; // @[DivSqrtRecFN_small.scala:422:5] wire io_inReady_0; // @[DivSqrtRecFN_small.scala:422:5] wire io_rawOutValid_div_0; // @[DivSqrtRecFN_small.scala:422:5] wire io_rawOutValid_sqrt_0; // @[DivSqrtRecFN_small.scala:422:5] wire [2:0] io_roundingModeOut_0; // @[DivSqrtRecFN_small.scala:422:5] wire io_invalidExc_0; // @[DivSqrtRecFN_small.scala:422:5] wire io_infiniteExc_0; // @[DivSqrtRecFN_small.scala:422:5] wire [8:0] divSqrtRawFN_io_a_exp = io_a_0[31:23]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _divSqrtRawFN_io_a_isZero_T = divSqrtRawFN_io_a_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire divSqrtRawFN_io_a_isZero = _divSqrtRawFN_io_a_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] wire divSqrtRawFN_io_a_out_isZero = divSqrtRawFN_io_a_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _divSqrtRawFN_io_a_isSpecial_T = divSqrtRawFN_io_a_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire divSqrtRawFN_io_a_isSpecial = &_divSqrtRawFN_io_a_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _divSqrtRawFN_io_a_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _divSqrtRawFN_io_a_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] wire _divSqrtRawFN_io_a_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [9:0] _divSqrtRawFN_io_a_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [24:0] _divSqrtRawFN_io_a_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire divSqrtRawFN_io_a_out_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire divSqrtRawFN_io_a_out_isInf; // @[rawFloatFromRecFN.scala:55:23] wire divSqrtRawFN_io_a_out_sign; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] divSqrtRawFN_io_a_out_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] divSqrtRawFN_io_a_out_sig; // @[rawFloatFromRecFN.scala:55:23] wire _divSqrtRawFN_io_a_out_isNaN_T = divSqrtRawFN_io_a_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _divSqrtRawFN_io_a_out_isInf_T = divSqrtRawFN_io_a_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _divSqrtRawFN_io_a_out_isNaN_T_1 = divSqrtRawFN_io_a_isSpecial & _divSqrtRawFN_io_a_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign divSqrtRawFN_io_a_out_isNaN = _divSqrtRawFN_io_a_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _divSqrtRawFN_io_a_out_isInf_T_1 = ~_divSqrtRawFN_io_a_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _divSqrtRawFN_io_a_out_isInf_T_2 = divSqrtRawFN_io_a_isSpecial & _divSqrtRawFN_io_a_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign divSqrtRawFN_io_a_out_isInf = _divSqrtRawFN_io_a_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _divSqrtRawFN_io_a_out_sign_T = io_a_0[32]; // @[rawFloatFromRecFN.scala:59:25] assign divSqrtRawFN_io_a_out_sign = _divSqrtRawFN_io_a_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _divSqrtRawFN_io_a_out_sExp_T = {1'h0, divSqrtRawFN_io_a_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign divSqrtRawFN_io_a_out_sExp = _divSqrtRawFN_io_a_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _divSqrtRawFN_io_a_out_sig_T = ~divSqrtRawFN_io_a_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _divSqrtRawFN_io_a_out_sig_T_1 = {1'h0, _divSqrtRawFN_io_a_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [22:0] _divSqrtRawFN_io_a_out_sig_T_2 = io_a_0[22:0]; // @[rawFloatFromRecFN.scala:61:49] assign _divSqrtRawFN_io_a_out_sig_T_3 = {_divSqrtRawFN_io_a_out_sig_T_1, _divSqrtRawFN_io_a_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign divSqrtRawFN_io_a_out_sig = _divSqrtRawFN_io_a_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire [8:0] divSqrtRawFN_io_b_exp = io_b_0[31:23]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _divSqrtRawFN_io_b_isZero_T = divSqrtRawFN_io_b_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire divSqrtRawFN_io_b_isZero = _divSqrtRawFN_io_b_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] wire divSqrtRawFN_io_b_out_isZero = divSqrtRawFN_io_b_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _divSqrtRawFN_io_b_isSpecial_T = divSqrtRawFN_io_b_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire divSqrtRawFN_io_b_isSpecial = &_divSqrtRawFN_io_b_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _divSqrtRawFN_io_b_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _divSqrtRawFN_io_b_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] wire _divSqrtRawFN_io_b_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [9:0] _divSqrtRawFN_io_b_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [24:0] _divSqrtRawFN_io_b_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire divSqrtRawFN_io_b_out_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire divSqrtRawFN_io_b_out_isInf; // @[rawFloatFromRecFN.scala:55:23] wire divSqrtRawFN_io_b_out_sign; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] divSqrtRawFN_io_b_out_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] divSqrtRawFN_io_b_out_sig; // @[rawFloatFromRecFN.scala:55:23] wire _divSqrtRawFN_io_b_out_isNaN_T = divSqrtRawFN_io_b_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _divSqrtRawFN_io_b_out_isInf_T = divSqrtRawFN_io_b_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _divSqrtRawFN_io_b_out_isNaN_T_1 = divSqrtRawFN_io_b_isSpecial & _divSqrtRawFN_io_b_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign divSqrtRawFN_io_b_out_isNaN = _divSqrtRawFN_io_b_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _divSqrtRawFN_io_b_out_isInf_T_1 = ~_divSqrtRawFN_io_b_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _divSqrtRawFN_io_b_out_isInf_T_2 = divSqrtRawFN_io_b_isSpecial & _divSqrtRawFN_io_b_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign divSqrtRawFN_io_b_out_isInf = _divSqrtRawFN_io_b_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _divSqrtRawFN_io_b_out_sign_T = io_b_0[32]; // @[rawFloatFromRecFN.scala:59:25] assign divSqrtRawFN_io_b_out_sign = _divSqrtRawFN_io_b_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _divSqrtRawFN_io_b_out_sExp_T = {1'h0, divSqrtRawFN_io_b_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign divSqrtRawFN_io_b_out_sExp = _divSqrtRawFN_io_b_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _divSqrtRawFN_io_b_out_sig_T = ~divSqrtRawFN_io_b_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _divSqrtRawFN_io_b_out_sig_T_1 = {1'h0, _divSqrtRawFN_io_b_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [22:0] _divSqrtRawFN_io_b_out_sig_T_2 = io_b_0[22:0]; // @[rawFloatFromRecFN.scala:61:49] assign _divSqrtRawFN_io_b_out_sig_T_3 = {_divSqrtRawFN_io_b_out_sig_T_1, _divSqrtRawFN_io_b_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign divSqrtRawFN_io_b_out_sig = _divSqrtRawFN_io_b_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] DivSqrtRawFN_small_e8_s24_5 divSqrtRawFN ( // @[DivSqrtRecFN_small.scala:446:15] .clock (clock), .reset (reset), .io_inReady (io_inReady_0), .io_inValid (io_inValid_0), // @[DivSqrtRecFN_small.scala:422:5] .io_sqrtOp (io_sqrtOp_0), // @[DivSqrtRecFN_small.scala:422:5] .io_a_isNaN (divSqrtRawFN_io_a_out_isNaN), // @[rawFloatFromRecFN.scala:55:23] .io_a_isInf (divSqrtRawFN_io_a_out_isInf), // @[rawFloatFromRecFN.scala:55:23] .io_a_isZero (divSqrtRawFN_io_a_out_isZero), // @[rawFloatFromRecFN.scala:55:23] .io_a_sign (divSqrtRawFN_io_a_out_sign), // @[rawFloatFromRecFN.scala:55:23] .io_a_sExp (divSqrtRawFN_io_a_out_sExp), // @[rawFloatFromRecFN.scala:55:23] .io_a_sig (divSqrtRawFN_io_a_out_sig), // @[rawFloatFromRecFN.scala:55:23] .io_b_isNaN (divSqrtRawFN_io_b_out_isNaN), // @[rawFloatFromRecFN.scala:55:23] .io_b_isInf (divSqrtRawFN_io_b_out_isInf), // @[rawFloatFromRecFN.scala:55:23] .io_b_isZero (divSqrtRawFN_io_b_out_isZero), // @[rawFloatFromRecFN.scala:55:23] .io_b_sign (divSqrtRawFN_io_b_out_sign), // @[rawFloatFromRecFN.scala:55:23] .io_b_sExp (divSqrtRawFN_io_b_out_sExp), // @[rawFloatFromRecFN.scala:55:23] .io_b_sig (divSqrtRawFN_io_b_out_sig), // @[rawFloatFromRecFN.scala:55:23] .io_roundingMode (io_roundingMode_0), // @[DivSqrtRecFN_small.scala:422:5] .io_rawOutValid_div (io_rawOutValid_div_0), .io_rawOutValid_sqrt (io_rawOutValid_sqrt_0), .io_roundingModeOut (io_roundingModeOut_0), .io_invalidExc (io_invalidExc_0), .io_infiniteExc (io_infiniteExc_0), .io_rawOut_isNaN (io_rawOut_isNaN_0), .io_rawOut_isInf (io_rawOut_isInf_0), .io_rawOut_isZero (io_rawOut_isZero_0), .io_rawOut_sign (io_rawOut_sign_0), .io_rawOut_sExp (io_rawOut_sExp_0), .io_rawOut_sig (io_rawOut_sig_0) ); // @[DivSqrtRecFN_small.scala:446:15] assign io_inReady = io_inReady_0; // @[DivSqrtRecFN_small.scala:422:5] assign io_rawOutValid_div = io_rawOutValid_div_0; // @[DivSqrtRecFN_small.scala:422:5] assign io_rawOutValid_sqrt = io_rawOutValid_sqrt_0; // @[DivSqrtRecFN_small.scala:422:5] assign io_roundingModeOut = io_roundingModeOut_0; // @[DivSqrtRecFN_small.scala:422:5] assign io_invalidExc = io_invalidExc_0; // @[DivSqrtRecFN_small.scala:422:5] assign io_infiniteExc = io_infiniteExc_0; // @[DivSqrtRecFN_small.scala:422:5] assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[DivSqrtRecFN_small.scala:422:5] assign io_rawOut_isInf = io_rawOut_isInf_0; // @[DivSqrtRecFN_small.scala:422:5] assign io_rawOut_isZero = io_rawOut_isZero_0; // @[DivSqrtRecFN_small.scala:422:5] assign io_rawOut_sign = io_rawOut_sign_0; // @[DivSqrtRecFN_small.scala:422:5] assign io_rawOut_sExp = io_rawOut_sExp_0; // @[DivSqrtRecFN_small.scala:422:5] assign io_rawOut_sig = io_rawOut_sig_0; // @[DivSqrtRecFN_small.scala:422:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_362( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19] wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module MacUnit_150( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [7:0] io_in_a, // @[PE.scala:16:14] input [7:0] io_in_b, // @[PE.scala:16:14] input [19:0] io_in_c, // @[PE.scala:16:14] output [19:0] io_out_d // @[PE.scala:16:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7] wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7] wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54] wire [19:0] io_out_d_0; // @[PE.scala:14:7] wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7] wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54] assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54] assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7] assign io_out_d = io_out_d_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_97( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_108 io_out_source_valid_1 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.diplomacy.{ AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry, IdRange, RegionType, TransferSizes } import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions} import freechips.rocketchip.util.{ AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase, CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct } import scala.math.max //These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel case class TLMasterToSlaveTransferSizes( // Supports both Acquire+Release of the following two sizes: acquireT: TransferSizes = TransferSizes.none, acquireB: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none) extends TLCommonTransferSizes { def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .intersect(rhs.acquireT), acquireB = acquireB .intersect(rhs.acquireB), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint)) def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .mincover(rhs.acquireT), acquireB = acquireB .mincover(rhs.acquireB), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint)) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(acquireT, "T"), str(acquireB, "B"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""acquireT = ${acquireT} |acquireB = ${acquireB} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLMasterToSlaveTransferSizes { def unknownEmits = TLMasterToSlaveTransferSizes( acquireT = TransferSizes(1, 4096), acquireB = TransferSizes(1, 4096), arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096)) def unknownSupports = TLMasterToSlaveTransferSizes() } //These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel case class TLSlaveToMasterTransferSizes( probe: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none ) extends TLCommonTransferSizes { def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .intersect(rhs.probe), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint) ) def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .mincover(rhs.probe), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint) ) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(probe, "P"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""probe = ${probe} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLSlaveToMasterTransferSizes { def unknownEmits = TLSlaveToMasterTransferSizes( arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096), probe = TransferSizes(1, 4096)) def unknownSupports = TLSlaveToMasterTransferSizes() } trait TLCommonTransferSizes { def arithmetic: TransferSizes def logical: TransferSizes def get: TransferSizes def putFull: TransferSizes def putPartial: TransferSizes def hint: TransferSizes } class TLSlaveParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], setName: Option[String], val address: Seq[AddressSet], val regionType: RegionType.T, val executable: Boolean, val fifoId: Option[Int], val supports: TLMasterToSlaveTransferSizes, val emits: TLSlaveToMasterTransferSizes, // By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation) val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData // If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order // Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck // ReleaseAck may NEVER be denied extends SimpleProduct { def sortedAddress = address.sorted override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters] override def productPrefix = "TLSlaveParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 11 def productElement(n: Int): Any = n match { case 0 => name case 1 => address case 2 => resources case 3 => regionType case 4 => executable case 5 => fifoId case 6 => supports case 7 => emits case 8 => alwaysGrantsT case 9 => mayDenyGet case 10 => mayDenyPut case _ => throw new IndexOutOfBoundsException(n.toString) } def supportsAcquireT: TransferSizes = supports.acquireT def supportsAcquireB: TransferSizes = supports.acquireB def supportsArithmetic: TransferSizes = supports.arithmetic def supportsLogical: TransferSizes = supports.logical def supportsGet: TransferSizes = supports.get def supportsPutFull: TransferSizes = supports.putFull def supportsPutPartial: TransferSizes = supports.putPartial def supportsHint: TransferSizes = supports.hint require (!address.isEmpty, "Address cannot be empty") address.foreach { a => require (a.finite, "Address must be finite") } address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)") require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)") require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)") require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)") require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)") require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)") require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT") // Make sure that the regionType agrees with the capabilities require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected") val maxTransfer = List( // Largest supported transfer of all types supportsAcquireT.max, supportsAcquireB.max, supportsArithmetic.max, supportsLogical.max, supportsGet.max, supportsPutFull.max, supportsPutPartial.max).max val maxAddress = address.map(_.max).max val minAlignment = address.map(_.alignment).min // The device had better not support a transfer larger than its alignment require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)") def toResource: ResourceAddress = { ResourceAddress(address, ResourcePermissions( r = supportsAcquireB || supportsGet, w = supportsAcquireT || supportsPutFull, x = executable, c = supportsAcquireB, a = supportsArithmetic && supportsLogical)) } def findTreeViolation() = nodePath.find { case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false case _: SinkNode[_, _, _, _, _] => false case node => node.inputs.size != 1 } def isTree = findTreeViolation() == None def infoString = { s"""Slave Name = ${name} |Slave Address = ${address} |supports = ${supports.infoString} | |""".stripMargin } def v1copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { new TLSlaveParameters( setName = setName, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = emits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: Option[String] = setName, address: Seq[AddressSet] = address, regionType: RegionType.T = regionType, executable: Boolean = executable, fifoId: Option[Int] = fifoId, supports: TLMasterToSlaveTransferSizes = supports, emits: TLSlaveToMasterTransferSizes = emits, alwaysGrantsT: Boolean = alwaysGrantsT, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } @deprecated("Use v1copy instead of copy","") def copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { v1copy( address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supportsAcquireT = supportsAcquireT, supportsAcquireB = supportsAcquireB, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } } object TLSlaveParameters { def v1( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = { new TLSlaveParameters( setName = None, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLSlaveToMasterTransferSizes.unknownEmits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2( address: Seq[AddressSet], nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Seq(), name: Option[String] = None, regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, fifoId: Option[Int] = None, supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports, emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits, alwaysGrantsT: Boolean = false, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } } object TLManagerParameters { @deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","") def apply( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = TLSlaveParameters.v1( address, resources, regionType, executable, nodePath, supportsAcquireT, supportsAcquireB, supportsArithmetic, supportsLogical, supportsGet, supportsPutFull, supportsPutPartial, supportsHint, mayDenyGet, mayDenyPut, alwaysGrantsT, fifoId, ) } case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int]) { def members = Seq(a, b, c, d) members.collect { case Some(beatBytes) => require (isPow2(beatBytes), "Data channel width must be a power of 2") } } object TLChannelBeatBytes{ def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes( Some(beatBytes), Some(beatBytes), Some(beatBytes), Some(beatBytes)) def apply(): TLChannelBeatBytes = TLChannelBeatBytes( None, None, None, None) } class TLSlavePortParameters private( val slaves: Seq[TLSlaveParameters], val channelBytes: TLChannelBeatBytes, val endSinkId: Int, val minLatency: Int, val responseFields: Seq[BundleFieldBase], val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct { def sortedSlaves = slaves.sortBy(_.sortedAddress.head) override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters] override def productPrefix = "TLSlavePortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => slaves case 1 => channelBytes case 2 => endSinkId case 3 => minLatency case 4 => responseFields case 5 => requestKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!slaves.isEmpty, "Slave ports must have slaves") require (endSinkId >= 0, "Sink ids cannot be negative") require (minLatency >= 0, "Minimum required latency cannot be negative") // Using this API implies you cannot handle mixed-width busses def beatBytes = { channelBytes.members.foreach { width => require (width.isDefined && width == channelBytes.a) } channelBytes.a.get } // TODO this should be deprecated def managers = slaves def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = { val relevant = slaves.filter(m => policy(m)) relevant.foreach { m => require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ") } } // Bounds on required sizes def maxAddress = slaves.map(_.maxAddress).max def maxTransfer = slaves.map(_.maxTransfer).max def mayDenyGet = slaves.exists(_.mayDenyGet) def mayDenyPut = slaves.exists(_.mayDenyPut) // Diplomatically determined operation sizes emitted by all outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _) // Operation Emitted by at least one outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _) val allSupportAcquireT = allSupportClaims.acquireT val allSupportAcquireB = allSupportClaims.acquireB val allSupportArithmetic = allSupportClaims.arithmetic val allSupportLogical = allSupportClaims.logical val allSupportGet = allSupportClaims.get val allSupportPutFull = allSupportClaims.putFull val allSupportPutPartial = allSupportClaims.putPartial val allSupportHint = allSupportClaims.hint // Operation supported by at least one outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _) val anySupportAcquireT = !anySupportClaims.acquireT.none val anySupportAcquireB = !anySupportClaims.acquireB.none val anySupportArithmetic = !anySupportClaims.arithmetic.none val anySupportLogical = !anySupportClaims.logical.none val anySupportGet = !anySupportClaims.get.none val anySupportPutFull = !anySupportClaims.putFull.none val anySupportPutPartial = !anySupportClaims.putPartial.none val anySupportHint = !anySupportClaims.hint.none // Supporting Acquire means being routable for GrantAck require ((endSinkId == 0) == !anySupportAcquireB) // These return Option[TLSlaveParameters] for your convenience def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address))) // The safe version will check the entire address def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _))) // The fast version assumes the address is valid (you probably want fastProperty instead of this function) def findFast(address: UInt) = { val routingMask = AddressDecoder(slaves.map(_.address)) VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _))) } // Compute the simplest AddressSets that decide a key def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = { val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) => k -> vs.flatMap(_._2) } val reductionMask = AddressDecoder(groups.map(_._2)) groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) } } // Select a property def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D = Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) }) // Note: returns the actual fifoId + 1 or 0 if None def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U) def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B) // Does this Port manage this ID/address? def containsSafe(address: UInt) = findSafe(address).reduce(_ || _) private def addressHelper( // setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity safe: Boolean, // member filters out the sizes being checked based on the opcode being emitted or supported member: TLSlaveParameters => TransferSizes, address: UInt, lgSize: UInt, // range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity range: Option[TransferSizes]): Bool = { // trim reduces circuit complexity by intersecting checked sizes with the range argument def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x) // groupBy returns an unordered map, convert back to Seq and sort the result for determinism // groupByIntoSeq is turning slaves into trimmed membership sizes // We are grouping all the slaves by their transfer size where // if they support the trimmed size then // member is the type of transfer that you are looking for (What you are trying to filter on) // When you consider membership, you are trimming the sizes to only the ones that you care about // you are filtering the slaves based on both whether they support a particular opcode and the size // Grouping the slaves based on the actual transfer size range they support // intersecting the range and checking their membership // FOR SUPPORTCASES instead of returning the list of slaves, // you are returning a map from transfer size to the set of // address sets that are supported for that transfer size // find all the slaves that support a certain type of operation and then group their addresses by the supported size // for every size there could be multiple address ranges // safety is a trade off between checking between all possible addresses vs only the addresses // that are known to have supported sizes // the trade off is 'checking all addresses is a more expensive circuit but will always give you // the right answer even if you give it an illegal address' // the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer // fast presumes address legality // This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies. // In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size. val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) => k -> vs.flatMap(_.address) } // safe produces a circuit that compares against all possible addresses, // whereas fast presumes that the address is legal but uses an efficient address decoder val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2)) // Simplified creates the most concise possible representation of each cases' address sets based on the mask. val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) } simplified.map { case (s, a) => // s is a size, you are checking for this size either the size of the operation is in s // We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire. ((Some(s) == range).B || s.containsLg(lgSize)) && a.map(_.contains(address)).reduce(_||_) }.foldLeft(false.B)(_||_) } def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range) def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range) def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range) def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range) def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range) def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range) def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range) def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range) def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range) def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range) def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range) def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range) def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range) def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range) def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range) def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range) def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range) def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range) def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range) def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range) def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range) def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range) def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range) def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption def isTree = !slaves.exists(!_.isTree) def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString def v1copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = managers, channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } def v2copy( slaves: Seq[TLSlaveParameters] = slaves, channelBytes: TLChannelBeatBytes = channelBytes, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = slaves, channelBytes = channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } @deprecated("Use v1copy instead of copy","") def copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { v1copy( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } object TLSlavePortParameters { def v1( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { new TLSlavePortParameters( slaves = managers, channelBytes = TLChannelBeatBytes(beatBytes), endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } } object TLManagerPortParameters { @deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","") def apply( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { TLSlavePortParameters.v1( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } class TLMasterParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], val name: String, val visibility: Seq[AddressSet], val unusedRegionTypes: Set[RegionType.T], val executesOnly: Boolean, val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C. val supports: TLSlaveToMasterTransferSizes, val emits: TLMasterToSlaveTransferSizes, val neverReleasesData: Boolean, val sourceId: IdRange) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters] override def productPrefix = "TLMasterParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 10 def productElement(n: Int): Any = n match { case 0 => name case 1 => sourceId case 2 => resources case 3 => visibility case 4 => unusedRegionTypes case 5 => executesOnly case 6 => requestFifo case 7 => supports case 8 => emits case 9 => neverReleasesData case _ => throw new IndexOutOfBoundsException(n.toString) } require (!sourceId.isEmpty) require (!visibility.isEmpty) require (supports.putFull.contains(supports.putPartial)) // We only support these operations if we support Probe (ie: we're a cache) require (supports.probe.contains(supports.arithmetic)) require (supports.probe.contains(supports.logical)) require (supports.probe.contains(supports.get)) require (supports.probe.contains(supports.putFull)) require (supports.probe.contains(supports.putPartial)) require (supports.probe.contains(supports.hint)) visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } val maxTransfer = List( supports.probe.max, supports.arithmetic.max, supports.logical.max, supports.get.max, supports.putFull.max, supports.putPartial.max).max def infoString = { s"""Master Name = ${name} |visibility = ${visibility} |emits = ${emits.infoString} |sourceId = ${sourceId} | |""".stripMargin } def v1copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { new TLMasterParameters( nodePath = nodePath, resources = this.resources, name = name, visibility = visibility, unusedRegionTypes = this.unusedRegionTypes, executesOnly = this.executesOnly, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = this.emits, neverReleasesData = this.neverReleasesData, sourceId = sourceId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: String = name, visibility: Seq[AddressSet] = visibility, unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes, executesOnly: Boolean = executesOnly, requestFifo: Boolean = requestFifo, supports: TLSlaveToMasterTransferSizes = supports, emits: TLMasterToSlaveTransferSizes = emits, neverReleasesData: Boolean = neverReleasesData, sourceId: IdRange = sourceId) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } @deprecated("Use v1copy instead of copy","") def copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { v1copy( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } object TLMasterParameters { def v1( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { new TLMasterParameters( nodePath = nodePath, resources = Nil, name = name, visibility = visibility, unusedRegionTypes = Set(), executesOnly = false, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData = false, sourceId = sourceId) } def v2( nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Nil, name: String, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), unusedRegionTypes: Set[RegionType.T] = Set(), executesOnly: Boolean = false, requestFifo: Boolean = false, supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports, emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData: Boolean = false, sourceId: IdRange = IdRange(0,1)) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } } object TLClientParameters { @deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","") def apply( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet.everything), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { TLMasterParameters.v1( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } class TLMasterPortParameters private( val masters: Seq[TLMasterParameters], val channelBytes: TLChannelBeatBytes, val minLatency: Int, val echoFields: Seq[BundleFieldBase], val requestFields: Seq[BundleFieldBase], val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters] override def productPrefix = "TLMasterPortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => masters case 1 => channelBytes case 2 => minLatency case 3 => echoFields case 4 => requestFields case 5 => responseKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!masters.isEmpty) require (minLatency >= 0) def clients = masters // Require disjoint ranges for Ids IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) => require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}") } // Bounds on required sizes def endSourceId = masters.map(_.sourceId.end).max def maxTransfer = masters.map(_.maxTransfer).max // The unused sources < endSourceId def unusedSources: Seq[Int] = { val usedSources = masters.map(_.sourceId).sortBy(_.start) ((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) => end until start } } // Diplomatically determined operation sizes emitted by all inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = masters.map(_.emits).reduce( _ intersect _) // Diplomatically determined operation sizes Emitted by at least one inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all inward Masters // as opposed to supports* which generate circuitry to check which specific addresses val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _) val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _) val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _) val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _) val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _) val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _) val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _) // Diplomatically determined operation sizes supported by at least one master // as opposed to supports* which generate circuitry to check which specific addresses val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _) val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _) val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _) val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _) val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _) val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _) val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _) // These return Option[TLMasterParameters] for your convenience def find(id: Int) = masters.find(_.sourceId.contains(id)) // Synthesizable lookup methods def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id))) def contains(id: UInt) = find(id).reduce(_ || _) def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B)) // Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = { val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _) // this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version; // the case where there is only one group. if (allSame) member(masters(0)).containsLg(lgSize) else { // Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize Mux1H(find(id), masters.map(member(_).containsLg(lgSize))) } } // Check for support of a given operation at a specific id val supportsProbe = sourceIdHelper(_.supports.probe) _ val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _ val supportsLogical = sourceIdHelper(_.supports.logical) _ val supportsGet = sourceIdHelper(_.supports.get) _ val supportsPutFull = sourceIdHelper(_.supports.putFull) _ val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _ val supportsHint = sourceIdHelper(_.supports.hint) _ // TODO: Merge sourceIdHelper2 with sourceIdHelper private def sourceIdHelper2( member: TLMasterParameters => TransferSizes, sourceId: UInt, lgSize: UInt): Bool = { // Because sourceIds are uniquely owned by each master, we use them to group the // cases that have to be checked. val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) => k -> vs.map(_.sourceId) } emitCases.map { case (s, a) => (s.containsLg(lgSize)) && a.map(_.contains(sourceId)).reduce(_||_) }.foldLeft(false.B)(_||_) } // Check for emit of a given operation at a specific id def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize) def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize) def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize) def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize) def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize) def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize) def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize) def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize) def infoString = masters.map(_.infoString).mkString def v1copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = clients, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2copy( masters: Seq[TLMasterParameters] = masters, channelBytes: TLChannelBeatBytes = channelBytes, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } @deprecated("Use v1copy instead of copy","") def copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { v1copy( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLClientPortParameters { @deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","") def apply( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { TLMasterPortParameters.v1( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLMasterPortParameters { def v1( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = clients, channelBytes = TLChannelBeatBytes(), minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2( masters: Seq[TLMasterParameters], channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(), minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } } case class TLBundleParameters( addressBits: Int, dataBits: Int, sourceBits: Int, sinkBits: Int, sizeBits: Int, echoFields: Seq[BundleFieldBase], requestFields: Seq[BundleFieldBase], responseFields: Seq[BundleFieldBase], hasBCE: Boolean) { // Chisel has issues with 0-width wires require (addressBits >= 1) require (dataBits >= 8) require (sourceBits >= 1) require (sinkBits >= 1) require (sizeBits >= 1) require (isPow2(dataBits)) echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") } val addrLoBits = log2Up(dataBits/8) // Used to uniquify bus IP names def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u") def union(x: TLBundleParameters) = TLBundleParameters( max(addressBits, x.addressBits), max(dataBits, x.dataBits), max(sourceBits, x.sourceBits), max(sinkBits, x.sinkBits), max(sizeBits, x.sizeBits), echoFields = BundleField.union(echoFields ++ x.echoFields), requestFields = BundleField.union(requestFields ++ x.requestFields), responseFields = BundleField.union(responseFields ++ x.responseFields), hasBCE || x.hasBCE) } object TLBundleParameters { val emptyBundleParams = TLBundleParameters( addressBits = 1, dataBits = 8, sourceBits = 1, sinkBits = 1, sizeBits = 1, echoFields = Nil, requestFields = Nil, responseFields = Nil, hasBCE = false) def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y)) def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) = new TLBundleParameters( addressBits = log2Up(slave.maxAddress + 1), dataBits = slave.beatBytes * 8, sourceBits = log2Up(master.endSourceId), sinkBits = log2Up(slave.endSinkId), sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1), echoFields = master.echoFields, requestFields = BundleField.accept(master.requestFields, slave.requestKeys), responseFields = BundleField.accept(slave.responseFields, master.responseKeys), hasBCE = master.anySupportProbe && slave.anySupportAcquireB) } case class TLEdgeParameters( master: TLMasterPortParameters, slave: TLSlavePortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { // legacy names: def manager = slave def client = master val maxTransfer = max(master.maxTransfer, slave.maxTransfer) val maxLgSize = log2Ceil(maxTransfer) // Sanity check the link... require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})") def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims) val bundle = TLBundleParameters(master, slave) def formatEdge = master.infoString + "\n" + slave.infoString } case class TLCreditedDelay( a: CreditedDelay, b: CreditedDelay, c: CreditedDelay, d: CreditedDelay, e: CreditedDelay) { def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay( a = a + that.a, b = b + that.b, c = c + that.c, d = d + that.d, e = e + that.e) override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})" } object TLCreditedDelay { def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay) } case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString} case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val delay = client.delay + manager.delay val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters) case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base)) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } // To be unified, devices must agree on all of these terms case class ManagerUnificationKey( resources: Seq[Resource], regionType: RegionType.T, executable: Boolean, supportsAcquireT: TransferSizes, supportsAcquireB: TransferSizes, supportsArithmetic: TransferSizes, supportsLogical: TransferSizes, supportsGet: TransferSizes, supportsPutFull: TransferSizes, supportsPutPartial: TransferSizes, supportsHint: TransferSizes) object ManagerUnificationKey { def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey( resources = x.resources, regionType = x.regionType, executable = x.executable, supportsAcquireT = x.supportsAcquireT, supportsAcquireB = x.supportsAcquireB, supportsArithmetic = x.supportsArithmetic, supportsLogical = x.supportsLogical, supportsGet = x.supportsGet, supportsPutFull = x.supportsPutFull, supportsPutPartial = x.supportsPutPartial, supportsHint = x.supportsHint) } object ManagerUnification { def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = { slaves.groupBy(ManagerUnificationKey.apply).values.map { seq => val agree = seq.forall(_.fifoId == seq.head.fifoId) seq(0).v1copy( address = AddressSet.unify(seq.flatMap(_.address)), fifoId = if (agree) seq(0).fifoId else None) }.toList } } case class TLBufferParams( a: BufferParams = BufferParams.none, b: BufferParams = BufferParams.none, c: BufferParams = BufferParams.none, d: BufferParams = BufferParams.none, e: BufferParams = BufferParams.none ) extends DirectedBuffers[TLBufferParams] { def copyIn(x: BufferParams) = this.copy(b = x, d = x) def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x) def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x) } /** Pretty printing of TL source id maps */ class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] { private val tlDigits = String.valueOf(tl.endSourceId-1).length() protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s" private val sorted = tl.masters.sortBy(_.sourceId) val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c => TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo) } } case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean) extends IdMapEntry { val from = tlId val to = tlId val maxTransactionsInFlight = Some(tlId.size) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_14( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [7:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [7:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_a_bits_source = 1'h0; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_source = 1'h0; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire c_set = 1'h0; // @[Monitor.scala:738:34] wire c_set_wo_ready = 1'h0; // @[Monitor.scala:739:34] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire io_in_a_bits_mask = 1'h1; // @[Monitor.scala:36:7] wire _source_ok_T = 1'h1; // @[Parameters.scala:46:9] wire _source_ok_WIRE_0 = 1'h1; // @[Parameters.scala:1138:31] wire mask_sizeOH = 1'h1; // @[Misc.scala:202:81] wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:46:9] wire _source_ok_WIRE_1_0 = 1'h1; // @[Parameters.scala:1138:31] wire _same_cycle_resp_T_2 = 1'h1; // @[Monitor.scala:684:113] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire _same_cycle_resp_T_8 = 1'h1; // @[Monitor.scala:795:113] wire [3:0] _a_opcode_lookup_T = 4'h0; // @[Monitor.scala:637:69] wire [3:0] _a_size_lookup_T = 4'h0; // @[Monitor.scala:641:65] wire [3:0] _a_opcodes_set_T = 4'h0; // @[Monitor.scala:659:79] wire [3:0] _a_sizes_set_T = 4'h0; // @[Monitor.scala:660:77] wire [3:0] _d_opcodes_clr_T_4 = 4'h0; // @[Monitor.scala:680:101] wire [3:0] _d_sizes_clr_T_4 = 4'h0; // @[Monitor.scala:681:99] wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] c_opcodes_set = 4'h0; // @[Monitor.scala:740:34] wire [3:0] _c_opcode_lookup_T = 4'h0; // @[Monitor.scala:749:69] wire [3:0] _c_size_lookup_T = 4'h0; // @[Monitor.scala:750:67] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_T = 4'h0; // @[Monitor.scala:767:79] wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_T = 4'h0; // @[Monitor.scala:768:77] wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _d_opcodes_clr_T_10 = 4'h0; // @[Monitor.scala:790:101] wire [3:0] _d_sizes_clr_T_10 = 4'h0; // @[Monitor.scala:791:99] wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [30:0] _d_sizes_clr_T_5 = 31'hFF; // @[Monitor.scala:681:74] wire [30:0] _d_sizes_clr_T_11 = 31'hFF; // @[Monitor.scala:791:74] wire [30:0] _d_opcodes_clr_T_5 = 31'hF; // @[Monitor.scala:680:76] wire [30:0] _d_opcodes_clr_T_11 = 31'hF; // @[Monitor.scala:790:76] wire [1:0] _a_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _a_set_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _c_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _c_set_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_wo_ready_T_1 = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_T_1 = 2'h1; // @[OneHot.scala:58:35] wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] c_first_beats1_decode = 12'h0; // @[Edges.scala:220:59] wire [11:0] c_first_beats1 = 12'h0; // @[Edges.scala:221:14] wire [11:0] _c_first_count_T = 12'h0; // @[Edges.scala:234:27] wire [11:0] c_first_count = 12'h0; // @[Edges.scala:234:25] wire [11:0] _c_first_counter_T = 12'h0; // @[Edges.scala:236:21] wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [11:0] c_first_counter1 = 12'hFFF; // @[Edges.scala:230:28] wire [12:0] _c_first_counter1_T = 13'h1FFF; // @[Edges.scala:230:28] wire [2:0] io_in_a_bits_param = 3'h0; // @[Monitor.scala:36:7] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [7:0] _c_first_WIRE_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_first_WIRE_1_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_first_WIRE_2_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_first_WIRE_3_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [7:0] c_sizes_set = 8'h0; // @[Monitor.scala:741:34] wire [7:0] _c_set_wo_ready_WIRE_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_set_wo_ready_WIRE_1_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_set_WIRE_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_set_WIRE_1_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_opcodes_set_interm_WIRE_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_opcodes_set_interm_WIRE_1_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_sizes_set_interm_WIRE_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_sizes_set_interm_WIRE_1_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_opcodes_set_WIRE_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_opcodes_set_WIRE_1_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_sizes_set_WIRE_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_sizes_set_WIRE_1_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_probe_ack_WIRE_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_probe_ack_WIRE_1_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _c_probe_ack_WIRE_2_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _c_probe_ack_WIRE_3_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _same_cycle_resp_WIRE_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _same_cycle_resp_WIRE_1_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _same_cycle_resp_WIRE_2_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _same_cycle_resp_WIRE_3_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [7:0] _same_cycle_resp_WIRE_4_bits_data = 8'h0; // @[Bundles.scala:265:74] wire [7:0] _same_cycle_resp_WIRE_5_bits_data = 8'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57] wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57] wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [19:0] _c_sizes_set_T_1 = 20'h0; // @[Monitor.scala:768:52] wire [18:0] _c_opcodes_set_T_1 = 19'h0; // @[Monitor.scala:767:54] wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59] wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40] wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117] wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48] wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119] wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71] wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}] wire _T_1081 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1081; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1081; // @[Decoupled.scala:51:35] wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [11:0] a_first_beats1_decode = _a_first_beats1_decode_T_2; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [11:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 12'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [11:0] a_first_counter; // @[Edges.scala:229:27] wire [12:0] _a_first_counter1_T = {1'h0, a_first_counter} - 13'h1; // @[Edges.scala:229:27, :230:28] wire [11:0] a_first_counter1 = _a_first_counter1_T[11:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 12'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 12'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 12'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [11:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [11:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [11:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [3:0] size; // @[Monitor.scala:389:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _T_1154 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1154; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1154; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1154; // @[Decoupled.scala:51:35] wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [11:0] d_first_beats1_decode = _d_first_beats1_decode_T_2; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [11:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 12'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [11:0] d_first_counter; // @[Edges.scala:229:27] wire [12:0] _d_first_counter1_T = {1'h0, d_first_counter} - 13'h1; // @[Edges.scala:229:27, :230:28] wire [11:0] d_first_counter1 = _d_first_counter1_T[11:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 12'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 12'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 12'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [11:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [11:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [11:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [1:0] inflight; // @[Monitor.scala:614:27] reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35] wire [3:0] _a_opcode_lookup_T_1 = inflight_opcodes; // @[Monitor.scala:616:35, :637:44] reg [7:0] inflight_sizes; // @[Monitor.scala:618:33] wire [7:0] _a_size_lookup_T_1 = inflight_sizes; // @[Monitor.scala:618:33, :641:40] wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [11:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [11:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 12'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [11:0] a_first_counter_1; // @[Edges.scala:229:27] wire [12:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 13'h1; // @[Edges.scala:229:27, :230:28] wire [11:0] a_first_counter1_1 = _a_first_counter1_T_1[11:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 12'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 12'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 12'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [11:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [11:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [11:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [11:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5; // @[package.scala:243:46] wire [11:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 12'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [11:0] d_first_counter_1; // @[Edges.scala:229:27] wire [12:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 13'h1; // @[Edges.scala:229:27, :230:28] wire [11:0] d_first_counter1_1 = _d_first_counter1_T_1[11:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 12'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 12'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 12'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [11:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [11:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [11:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire a_set; // @[Monitor.scala:626:34] wire a_set_wo_ready; // @[Monitor.scala:627:34] wire [3:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [7:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [15:0] _a_opcode_lookup_T_6 = {12'h0, _a_opcode_lookup_T_1}; // @[Monitor.scala:637:{44,97}] wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [7:0] a_size_lookup; // @[Monitor.scala:639:33] wire [15:0] _a_size_lookup_T_6 = {8'h0, _a_size_lookup_T_1}; // @[Monitor.scala:641:{40,91}] wire [15:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[15:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _T_1004 = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26] assign a_set_wo_ready = _T_1004; // @[Monitor.scala:627:34, :651:26] wire _same_cycle_resp_T; // @[Monitor.scala:684:44] assign _same_cycle_resp_T = _T_1004; // @[Monitor.scala:651:26, :684:44] assign a_set = _T_1081 & a_first_1; // @[Decoupled.scala:51:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = a_set ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:626:34, :646:40, :655:70, :657:{28,61}] wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = a_set ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:626:34, :648:38, :655:70, :658:{28,59}] wire [18:0] _a_opcodes_set_T_1 = {15'h0, a_opcodes_set_interm}; // @[Monitor.scala:646:40, :659:54] assign a_opcodes_set = a_set ? _a_opcodes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:626:34, :630:33, :655:70, :659:{28,54}] wire [19:0] _a_sizes_set_T_1 = {15'h0, a_sizes_set_interm}; // @[Monitor.scala:648:38, :660:52] assign a_sizes_set = a_set ? _a_sizes_set_T_1[7:0] : 8'h0; // @[Monitor.scala:626:34, :632:31, :655:70, :660:{28,52}] wire d_clr; // @[Monitor.scala:664:34] wire d_clr_wo_ready; // @[Monitor.scala:665:34] wire [3:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [7:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_1 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_1; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_1; // @[Monitor.scala:673:46, :783:46] wire _T_1053 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] assign d_clr_wo_ready = _T_1053 & ~d_release_ack; // @[Monitor.scala:665:34, :673:46, :674:{26,71,74}] assign d_clr = _T_1154 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_opcodes_clr = {4{d_clr}}; // @[Monitor.scala:664:34, :668:33, :678:89, :680:21] assign d_sizes_clr = {8{d_clr}}; // @[Monitor.scala:664:34, :670:31, :678:89, :681:21] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire same_cycle_resp = _same_cycle_resp_T_1; // @[Monitor.scala:684:{55,88}] wire [1:0] _inflight_T = {inflight[1], inflight[0] | a_set}; // @[Monitor.scala:614:27, :626:34, :705:27] wire _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [1:0] _inflight_T_2 = {1'h0, _inflight_T[0] & _inflight_T_1}; // @[Monitor.scala:705:{27,36,38}] wire [3:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [3:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [3:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [7:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [7:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [7:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [1:0] inflight_1; // @[Monitor.scala:726:35] wire [1:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [3:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [3:0] _c_opcode_lookup_T_1 = inflight_opcodes_1; // @[Monitor.scala:727:35, :749:44] wire [3:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [7:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [7:0] _c_size_lookup_T_1 = inflight_sizes_1; // @[Monitor.scala:728:35, :750:42] wire [7:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [11:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8; // @[package.scala:243:46] wire [11:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 12'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [11:0] d_first_counter_2; // @[Edges.scala:229:27] wire [12:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 13'h1; // @[Edges.scala:229:27, :230:28] wire [11:0] d_first_counter1_2 = _d_first_counter1_T_2[11:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 12'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 12'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 12'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [11:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [11:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [11:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [7:0] c_size_lookup; // @[Monitor.scala:748:35] wire [15:0] _c_opcode_lookup_T_6 = {12'h0, _c_opcode_lookup_T_1}; // @[Monitor.scala:749:{44,97}] wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [15:0] _c_size_lookup_T_6 = {8'h0, _c_size_lookup_T_1}; // @[Monitor.scala:750:{42,93}] wire [15:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[15:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire d_clr_1; // @[Monitor.scala:774:34] wire d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [3:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [7:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1125 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1125 & d_release_ack_1; // @[Monitor.scala:775:34, :783:46, :784:{26,71}] assign d_clr_1 = _T_1154 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_opcodes_clr_1 = {4{d_clr_1}}; // @[Monitor.scala:774:34, :776:34, :788:88, :790:21] assign d_sizes_clr_1 = {8{d_clr_1}}; // @[Monitor.scala:774:34, :777:34, :788:88, :791:21] wire _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [1:0] _inflight_T_5 = {1'h0, _inflight_T_3[0] & _inflight_T_4}; // @[Monitor.scala:814:{35,44,46}] wire [3:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [3:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [7:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [7:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File InputUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ import constellation.routing.{FlowRoutingBundle} import constellation.noc.{HasNoCParams} class AbstractInputUnitIO( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams], )(implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val nodeId = cParam.destId val router_req = Decoupled(new RouteComputerReq) val router_resp = Input(new RouteComputerResp(outParams, egressParams)) val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams)) val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams)) val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })) val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams))) val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams))) val debug = Output(new Bundle { val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) }) val block = Input(Bool()) } abstract class AbstractInputUnit( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams { val nodeId = cParam.destId def io: AbstractInputUnitIO } class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module { val nVirtualChannels = cParam.nVirtualChannels val io = IO(new Bundle { val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits)))) val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits))) }) val useOutputQueues = cParam.useOutputQueues val delims = if (useOutputQueues) { cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_) } else { // If no queuing, have to add an additional slot since head == tail implies empty // TODO this should be fixed, should use all slots available cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_) } val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val ends = delims.tail.zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val fullSize = delims.last // Ugly case. Use multiple queues if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) { require(useOutputQueues) val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize))) qs.zipWithIndex.foreach { case (q,i) => val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U) q.io.enq.valid := sel.orR q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head)) q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail)) q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload)) io.deq(i) <> q.io.deq } } else { val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits)) val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val empty = (heads zip tails).map(t => t._1 === t._2) val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) } qs.foreach(_.io.enq.valid := false.B) qs.foreach(_.io.enq.bits := DontCare) val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id) val flit = Wire(new BaseFlit(cParam.payloadBits)) val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B flit.head := io.enq(0).bits.head flit.tail := io.enq(0).bits.tail flit.payload := io.enq(0).bits.payload when (io.enq(0).valid && !direct_to_q) { val tail = tails(io.enq(0).bits.virt_channel_id) mem.write(tail, flit) tails(io.enq(0).bits.virt_channel_id) := Mux( tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(vc_sel, starts.map(_.U)), tail + 1.U) } .elsewhen (io.enq(0).valid && direct_to_q) { for (i <- 0 until nVirtualChannels) { when (io.enq(0).bits.virt_channel_id === i.U) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := flit } } } if (useOutputQueues) { val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready } val to_q_oh = PriorityEncoderOH(can_to_q) val to_q = OHToUInt(to_q_oh) when (can_to_q.orR) { val head = Mux1H(to_q_oh, heads) heads(to_q) := Mux( head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(to_q_oh, starts.map(_.U)), head + 1.U) for (i <- 0 until nVirtualChannels) { when (to_q_oh(i)) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := mem.read(head) } } } for (i <- 0 until nVirtualChannels) { io.deq(i) <> qs(i).io.deq } } else { qs.map(_.io.deq.ready := false.B) val ready_sel = io.deq.map(_.ready) val fire = io.deq.map(_.fire) assert(PopCount(fire) <= 1.U) val head = Mux1H(fire, heads) when (fire.orR) { val fire_idx = OHToUInt(fire) heads(fire_idx) := Mux( head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(fire, starts.map(_.U)), head + 1.U) } val read_flit = mem.read(head) for (i <- 0 until nVirtualChannels) { io.deq(i).valid := !empty(i) io.deq(i).bits := read_flit } } } } class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams], combineRCVA: Boolean, combineSAST: Boolean ) (implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) { val nVirtualChannels = cParam.nVirtualChannels val virtualChannelParams = cParam.virtualChannelParams class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) { val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams])) } val io = IO(new InputUnitIO) val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5) class InputState extends Bundle { val g = UInt(3.W) val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) val flow = new FlowRoutingBundle val fifo_deps = UInt(nVirtualChannels.W) } val input_buffer = Module(new InputBuffer(cParam)) for (i <- 0 until cParam.srcSpeedup) { input_buffer.io.enq(i) := io.in.flit(i) } input_buffer.io.deq.foreach(_.ready := false.B) val route_arbiter = Module(new Arbiter( new RouteComputerReq, nVirtualChannels )) io.router_req <> route_arbiter.io.out val states = Reg(Vec(nVirtualChannels, new InputState)) val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_) val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_) if (anyFifo) { val idle_mask = VecInit(states.map(_.g === g_i)).asUInt for (s <- states) for (i <- 0 until nVirtualChannels) s.fifo_deps := s.fifo_deps & ~idle_mask } for (i <- 0 until cParam.srcSpeedup) { when (io.in.flit(i).fire && io.in.flit(i).bits.head) { val id = io.in.flit(i).bits.virt_channel_id assert(id < nVirtualChannels.U) assert(states(id).g === g_i) val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U states(id).g := Mux(at_dest, g_v, g_r) states(id).vc_sel.foreach(_.foreach(_ := false.B)) for (o <- 0 until nEgress) { when (o.U === io.in.flit(i).bits.flow.egress_node_id) { states(id).vc_sel(o+nOutputs)(0) := true.B } } states(id).flow := io.in.flit(i).bits.flow if (anyFifo) { val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) => s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id }).asUInt } } } (route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) => if (virtualChannelParams(idx).traversable) { i.valid := s.g === g_r i.bits.flow := s.flow i.bits.src_virt_id := idx.U when (i.fire) { s.g := g_v } } else { i.valid := false.B i.bits := DontCare } } when (io.router_req.fire) { val id = io.router_req.bits.src_virt_id assert(states(id).g === g_r) states(id).g := g_v for (i <- 0 until nVirtualChannels) { when (i.U === id) { states(i).vc_sel := io.router_resp.vc_sel } } } val mask = RegInit(0.U(nVirtualChannels.W)) val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams))) val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool())) val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask)) val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels) // Prioritize incoming packetes when (io.router_req.fire) { mask := (1.U << io.router_req.bits.src_virt_id) - 1.U } .elsewhen (vcalloc_vals.orR) { mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) }) } io.vcalloc_req.valid := vcalloc_vals.orR io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs) states.zipWithIndex.map { case (s,idx) => if (virtualChannelParams(idx).traversable) { vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U vcalloc_reqs(idx).in_vc := idx.U vcalloc_reqs(idx).vc_sel := s.vc_sel vcalloc_reqs(idx).flow := s.flow when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a } if (combineRCVA) { when (route_arbiter.io.in(idx).fire) { vcalloc_vals(idx) := true.B vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel } } } else { vcalloc_vals(idx) := false.B vcalloc_reqs(idx) := DontCare } } io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready when (io.vcalloc_req.fire) { for (i <- 0 until nVirtualChannels) { when (vcalloc_sel(i)) { states(i).vc_sel := io.vcalloc_resp.vc_sel states(i).g := g_a if (!combineRCVA) { assert(states(i).g === g_v) } } } } val salloc_arb = Module(new SwitchArbiter( nVirtualChannels, cParam.destSpeedup, outParams, egressParams )) (states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) => if (virtualChannelParams(i).traversable) { val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid r.bits.vc_sel := s.vc_sel val deq_tail = input_buffer.io.deq(i).bits.tail r.bits.tail := deq_tail when (r.fire && deq_tail) { s.g := g_i } input_buffer.io.deq(i).ready := r.ready } else { r.valid := false.B r.bits := DontCare } } io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready)) io.salloc_req <> salloc_arb.io.out when (io.block) { salloc_arb.io.out.foreach(_.ready := false.B) io.salloc_req.foreach(_.valid := false.B) } class OutBundle extends Bundle { val valid = Bool() val vid = UInt(virtualChannelBits.W) val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W) val flit = new Flit(cParam.payloadBits) } val salloc_outs = if (combineSAST) { Wire(Vec(cParam.destSpeedup, new OutBundle)) } else { Reg(Vec(cParam.destSpeedup, new OutBundle)) } io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)), salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) for (i <- 0 until cParam.destSpeedup) { val salloc_out = salloc_outs(i) salloc_out.valid := salloc_arb.io.out(i).fire salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i)) val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel)) val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq) when (salloc_arb.io.out(i).fire) { salloc_out.out_vid := virt_channel salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload)) salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head)) salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)) salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow)) } .otherwise { salloc_out.out_vid := DontCare salloc_out.flit := DontCare } salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch io.out(i).valid := salloc_out.valid io.out(i).bits.flit := salloc_out.flit io.out(i).bits.out_virt_channel := salloc_out.out_vid } def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = { if (virtualChannelParams(srcV).traversable) { outParams.zipWithIndex.map { case (oP, oI) => (0 until oP.nVirtualChannels).map { oV => var allow = false virtualChannelParams(srcV).possibleFlows.foreach { pI => allow = allow || routingRelation( cParam.channelRoutingInfos(srcV), oP.channelRoutingInfos(oV), pI ) } if (!allow) sel(oI)(oV) := false.B } } } } (0 until nVirtualChannels).map { i => if (!virtualChannelParams(i).traversable) states(i) := DontCare filterVCSel(states(i).vc_sel, i) } when (reset.asBool) { states.foreach(_.g := g_i) } }
module InputUnit_18( // @[InputUnit.scala:158:7] input clock, // @[InputUnit.scala:158:7] input reset, // @[InputUnit.scala:158:7] output [1:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14] output [1:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14] output [3:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14] output [2:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [3:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14] output [1:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_0, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_2, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_2_0, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_2_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_2_2, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_0, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_2, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_0, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_2, // @[InputUnit.scala:170:14] input io_vcalloc_req_ready, // @[InputUnit.scala:170:14] output io_vcalloc_req_valid, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_5_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_4_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_2, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_2_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_2_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_2_2, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_2, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_2, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_5_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_4_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_1, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_2, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_2_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_0_0, // @[InputUnit.scala:170:14] input io_out_credit_available_5_0, // @[InputUnit.scala:170:14] input io_out_credit_available_4_0, // @[InputUnit.scala:170:14] input io_out_credit_available_3_0, // @[InputUnit.scala:170:14] input io_out_credit_available_3_1, // @[InputUnit.scala:170:14] input io_out_credit_available_3_2, // @[InputUnit.scala:170:14] input io_out_credit_available_2_0, // @[InputUnit.scala:170:14] input io_out_credit_available_1_0, // @[InputUnit.scala:170:14] input io_out_credit_available_1_1, // @[InputUnit.scala:170:14] input io_out_credit_available_1_2, // @[InputUnit.scala:170:14] input io_out_credit_available_0_0, // @[InputUnit.scala:170:14] input io_salloc_req_0_ready, // @[InputUnit.scala:170:14] output io_salloc_req_0_valid, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_5_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_4_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14] output io_out_0_valid, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14] output [144:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14] output [1:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14] output [3:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14] output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [3:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14] output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14] output [1:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14] output [1:0] io_debug_va_stall, // @[InputUnit.scala:170:14] output [1:0] io_debug_sa_stall, // @[InputUnit.scala:170:14] input io_in_flit_0_valid, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14] input [144:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14] input [1:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14] input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14] input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] input [3:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14] input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input [1:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14] output [2:0] io_in_credit_return, // @[InputUnit.scala:170:14] output [2:0] io_in_vc_free // @[InputUnit.scala:170:14] ); wire _GEN; // @[MixedVec.scala:116:9] wire _GEN_0; // @[MixedVec.scala:116:9] wire vcalloc_reqs_2_vc_sel_3_2; // @[MixedVec.scala:116:9] wire vcalloc_vals_2; // @[InputUnit.scala:266:25, :272:46, :273:29] wire _GEN_1; // @[MixedVec.scala:116:9] wire _GEN_2; // @[MixedVec.scala:116:9] wire vcalloc_reqs_1_vc_sel_3_1; // @[MixedVec.scala:116:9] wire vcalloc_vals_1; // @[InputUnit.scala:266:25, :272:46, :273:29] wire _GEN_3; // @[MixedVec.scala:116:9] wire _GEN_4; // @[MixedVec.scala:116:9] wire vcalloc_reqs_0_vc_sel_3_0; // @[MixedVec.scala:116:9] wire vcalloc_reqs_0_vc_sel_2_0; // @[MixedVec.scala:116:9] wire vcalloc_reqs_0_vc_sel_0_0; // @[MixedVec.scala:116:9] wire vcalloc_vals_0; // @[InputUnit.scala:266:25, :272:46, :273:29] wire _salloc_arb_io_in_0_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_1_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_2_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26] wire [2:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26] wire _route_arbiter_io_in_1_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_2_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29] wire [1:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29] wire _input_buffer_io_deq_0_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28] wire [144:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28] wire [144:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28] wire [144:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28] reg [2:0] states_0_g; // @[InputUnit.scala:192:19] reg states_0_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_0_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_0_vc_sel_3_0; // @[InputUnit.scala:192:19] reg states_0_vc_sel_2_0; // @[InputUnit.scala:192:19] reg states_0_vc_sel_0_0; // @[InputUnit.scala:192:19] reg [1:0] states_0_flow_vnet_id; // @[InputUnit.scala:192:19] reg [3:0] states_0_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_0_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [3:0] states_0_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_0_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_1_g; // @[InputUnit.scala:192:19] reg states_1_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_1_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_1_vc_sel_3_1; // @[InputUnit.scala:192:19] reg [1:0] states_1_flow_vnet_id; // @[InputUnit.scala:192:19] reg [3:0] states_1_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_1_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [3:0] states_1_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_1_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_2_g; // @[InputUnit.scala:192:19] reg states_2_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_2_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_2_vc_sel_3_2; // @[InputUnit.scala:192:19] reg [1:0] states_2_flow_vnet_id; // @[InputUnit.scala:192:19] reg [3:0] states_2_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_2_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [3:0] states_2_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_2_flow_egress_node_id; // @[InputUnit.scala:192:19] wire _GEN_5 = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30] wire route_arbiter_io_in_0_valid = states_0_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22] wire route_arbiter_io_in_1_valid = states_1_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22] wire _GEN_6 = _route_arbiter_io_in_1_ready & route_arbiter_io_in_1_valid; // @[Decoupled.scala:51:35] wire route_arbiter_io_in_2_valid = states_2_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22] wire _GEN_7 = _route_arbiter_io_in_2_ready & route_arbiter_io_in_2_valid; // @[Decoupled.scala:51:35]
Generate the Verilog code corresponding to the following Chisel files. File Buffer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.BufferParams class TLBufferNode ( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit valName: ValName) extends TLAdapterNode( clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) }, managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) } ) { override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}" override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none) } class TLBuffer( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters) extends LazyModule { def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace) def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde) def this()(implicit p: Parameters) = this(BufferParams.default) val node = new TLBufferNode(a, b, c, d, e) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def headBundle = node.out.head._2.bundle override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.a <> a(in .a) in .d <> d(out.d) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> b(out.b) out.c <> c(in .c) out.e <> e(in .e) } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLBuffer { def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default) def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde) def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace) def apply( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters): TLNode = { val buffer = LazyModule(new TLBuffer(a, b, c, d, e)) buffer.node } def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = { val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) } name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } } buffers.map(_.node) } def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = { chain(depth, name) .reduceLeftOption(_ :*=* _) .getOrElse(TLNameNode("no_buffer")) } } File Crossing.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg} @deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2") class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val intnode = IntAdapterNode() lazy val module = new Impl class Impl extends LazyModuleImp(this) { (intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) => out := SynchronizerShiftReg(in, sync) } } } object IntSyncCrossingSource { def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) = { val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered)) intsource.node } } class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule { val node = IntSyncSourceNode(alreadyRegistered) lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl) class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := AsyncResetReg(Cat(in.reverse)).asBools } } class ImplRegistered extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := in } } } object IntSyncCrossingSink { @deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2") def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(sync) lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := SynchronizerShiftReg(in.sync, sync) } } } object IntSyncAsyncCrossingSink { def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(0) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := in.sync } } } object IntSyncSyncCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncSyncCrossingSink()) intsink.node } } class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(1) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := RegNext(in.sync) } } } object IntSyncRationalCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncRationalCrossingSink()) intsink.node } } File ClockDomain.scala: package freechips.rocketchip.prci import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing { def clockBundle: ClockBundle lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { childClock := clockBundle.clock childReset := clockBundle.reset override def provideImplicitClockToLazyChildren = true // these are just for backwards compatibility with external devices // that were manually wiring themselves to the domain's clock/reset input: val clock = IO(Output(chiselTypeOf(clockBundle.clock))) val reset = IO(Output(chiselTypeOf(clockBundle.reset))) clock := clockBundle.clock reset := clockBundle.reset } } abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain { def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name)) val clockNode = ClockSinkNode(Seq(clockSinkParams)) def clockBundle = clockNode.in.head._1 override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString } class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain { def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name)) val clockNode = ClockSourceNode(Seq(clockSourceParams)) def clockBundle = clockNode.out.head._1 override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString } abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing File HasTiles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.subsystem import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.bundlebridge._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.devices.debug.TLDebugModule import freechips.rocketchip.diplomacy.{DisableMonitors, FlipRendering} import freechips.rocketchip.interrupts.{IntXbar, IntSinkNode, IntSinkPortSimple, IntSyncAsyncCrossingSink} import freechips.rocketchip.tile.{MaxHartIdBits, BaseTile, InstantiableTileParams, TileParams, TilePRCIDomain, TraceBundle, PriorityMuxHartIdFromSeq} import freechips.rocketchip.tilelink.TLWidthWidget import freechips.rocketchip.prci.{ClockGroup, BundleBridgeBlockDuringReset, NoCrossing, SynchronousCrossing, CreditedCrossing, RationalCrossing, AsynchronousCrossing} import freechips.rocketchip.rocket.TracedInstruction import freechips.rocketchip.util.TraceCoreInterface import scala.collection.immutable.SortedMap /** Entry point for Config-uring the presence of Tiles */ case class TilesLocated(loc: HierarchicalLocation) extends Field[Seq[CanAttachTile]](Nil) /** List of HierarchicalLocations which might contain a Tile */ case object PossibleTileLocations extends Field[Seq[HierarchicalLocation]](Nil) /** For determining static tile id */ case object NumTiles extends Field[Int](0) /** Whether to add timing-closure registers along the path of the hart id * as it propagates through the subsystem and into the tile. * * These are typically only desirable when a dynamically programmable prefix is being combined * with the static hart id via [[freechips.rocketchip.subsystem.HasTiles.tileHartIdNexusNode]]. */ case object InsertTimingClosureRegistersOnHartIds extends Field[Boolean](false) /** Whether per-tile hart ids are going to be driven as inputs into a HasTiles block, * and if so, what their width should be. */ case object HasTilesExternalHartIdWidthKey extends Field[Option[Int]](None) /** Whether per-tile reset vectors are going to be driven as inputs into a HasTiles block. * * Unlike the hart ids, the reset vector width is determined by the sinks within the tiles, * based on the size of the address map visible to the tiles. */ case object HasTilesExternalResetVectorKey extends Field[Boolean](true) /** These are sources of "constants" that are driven into the tile. * * While they are not expected to change dyanmically while the tile is executing code, * they may be either tied to a contant value or programmed during boot or reset. * They need to be instantiated before tiles are attached within the subsystem containing them. */ trait HasTileInputConstants { this: LazyModule with Attachable with InstantiatesHierarchicalElements => /** tileHartIdNode is used to collect publishers and subscribers of hartids. */ val tileHartIdNodes: SortedMap[Int, BundleBridgeEphemeralNode[UInt]] = (0 until nTotalTiles).map { i => (i, BundleBridgeEphemeralNode[UInt]()) }.to(SortedMap) /** tileHartIdNexusNode is a BundleBridgeNexus that collects dynamic hart prefixes. * * Each "prefix" input is actually the same full width as the outer hart id; the expected usage * is that each prefix source would set only some non-overlapping portion of the bits to non-zero values. * This node orReduces them, and further combines the reduction with the static ids assigned to each tile, * producing a unique, dynamic hart id for each tile. * * If p(InsertTimingClosureRegistersOnHartIds) is set, the input and output values are registered. * * The output values are [[dontTouch]]'d to prevent constant propagation from pulling the values into * the tiles if they are constant, which would ruin deduplication of tiles that are otherwise homogeneous. */ val tileHartIdNexusNode = LazyModule(new BundleBridgeNexus[UInt]( inputFn = BundleBridgeNexus.orReduction[UInt](registered = p(InsertTimingClosureRegistersOnHartIds)) _, outputFn = (prefix: UInt, n: Int) => Seq.tabulate(n) { i => val y = dontTouch(prefix | totalTileIdList(i).U(p(MaxHartIdBits).W)) // dontTouch to keep constant prop from breaking tile dedup if (p(InsertTimingClosureRegistersOnHartIds)) BundleBridgeNexus.safeRegNext(y) else y }, default = Some(() => 0.U(p(MaxHartIdBits).W)), inputRequiresOutput = true, // guard against this being driven but then ignored in tileHartIdIONodes below shouldBeInlined = false // can't inline something whose output we are are dontTouching )).node // TODO: Replace the DebugModuleHartSelFuncs config key with logic to consume the dynamic hart IDs /** tileResetVectorNode is used to collect publishers and subscribers of tile reset vector addresses. */ val tileResetVectorNodes: SortedMap[Int, BundleBridgeEphemeralNode[UInt]] = (0 until nTotalTiles).map { i => (i, BundleBridgeEphemeralNode[UInt]()) }.to(SortedMap) /** tileResetVectorNexusNode is a BundleBridgeNexus that accepts a single reset vector source, and broadcasts it to all tiles. */ val tileResetVectorNexusNode = BundleBroadcast[UInt]( inputRequiresOutput = true // guard against this being driven but ignored in tileResetVectorIONodes below ) /** tileHartIdIONodes may generate subsystem IOs, one per tile, allowing the parent to assign unique hart ids. * * Or, if such IOs are not configured to exist, tileHartIdNexusNode is used to supply an id to each tile. */ val tileHartIdIONodes: Seq[BundleBridgeSource[UInt]] = p(HasTilesExternalHartIdWidthKey) match { case Some(w) => (0 until nTotalTiles).map { i => val hartIdSource = BundleBridgeSource(() => UInt(w.W)) tileHartIdNodes(i) := hartIdSource hartIdSource } case None => { (0 until nTotalTiles).map { i => tileHartIdNodes(i) :*= tileHartIdNexusNode } Nil } } /** tileResetVectorIONodes may generate subsystem IOs, one per tile, allowing the parent to assign unique reset vectors. * * Or, if such IOs are not configured to exist, tileResetVectorNexusNode is used to supply a single reset vector to every tile. */ val tileResetVectorIONodes: Seq[BundleBridgeSource[UInt]] = p(HasTilesExternalResetVectorKey) match { case true => (0 until nTotalTiles).map { i => val resetVectorSource = BundleBridgeSource[UInt]() tileResetVectorNodes(i) := resetVectorSource resetVectorSource } case false => { (0 until nTotalTiles).map { i => tileResetVectorNodes(i) :*= tileResetVectorNexusNode } Nil } } } /** These are sinks of notifications that are driven out from the tile. * * They need to be instantiated before tiles are attached to the subsystem containing them. */ trait HasTileNotificationSinks { this: LazyModule => val tileHaltXbarNode = IntXbar() val tileHaltSinkNode = IntSinkNode(IntSinkPortSimple()) tileHaltSinkNode := tileHaltXbarNode val tileWFIXbarNode = IntXbar() val tileWFISinkNode = IntSinkNode(IntSinkPortSimple()) tileWFISinkNode := tileWFIXbarNode val tileCeaseXbarNode = IntXbar() val tileCeaseSinkNode = IntSinkNode(IntSinkPortSimple()) tileCeaseSinkNode := tileCeaseXbarNode } /** Standardized interface by which parameterized tiles can be attached to contexts containing interconnect resources. * * Sub-classes of this trait can optionally override the individual connect functions in order to specialize * their attachment behaviors, but most use cases should be be handled simply by changing the implementation * of the injectNode functions in crossingParams. */ trait CanAttachTile { type TileType <: BaseTile type TileContextType <: DefaultHierarchicalElementContextType def tileParams: InstantiableTileParams[TileType] def crossingParams: HierarchicalElementCrossingParamsLike /** Narrow waist through which all tiles are intended to pass while being instantiated. */ def instantiate(allTileParams: Seq[TileParams], instantiatedTiles: SortedMap[Int, TilePRCIDomain[_]])(implicit p: Parameters): TilePRCIDomain[TileType] = { val clockSinkParams = tileParams.clockSinkParams.copy(name = Some(tileParams.uniqueName)) val tile_prci_domain = LazyModule(new TilePRCIDomain[TileType](clockSinkParams, crossingParams) { self => val element = self.element_reset_domain { LazyModule(tileParams.instantiate(crossingParams, PriorityMuxHartIdFromSeq(allTileParams))) } }) tile_prci_domain } /** A default set of connections that need to occur for most tile types */ def connect(domain: TilePRCIDomain[TileType], context: TileContextType): Unit = { connectMasterPorts(domain, context) connectSlavePorts(domain, context) connectInterrupts(domain, context) connectPRC(domain, context) connectOutputNotifications(domain, context) connectInputConstants(domain, context) connectTrace(domain, context) } /** Connect the port where the tile is the master to a TileLink interconnect. */ def connectMasterPorts(domain: TilePRCIDomain[TileType], context: Attachable): Unit = { implicit val p = context.p val dataBus = context.locateTLBusWrapper(crossingParams.master.where) dataBus.coupleFrom(tileParams.baseName) { bus => bus :=* crossingParams.master.injectNode(context) :=* domain.crossMasterPort(crossingParams.crossingType) } } /** Connect the port where the tile is the slave to a TileLink interconnect. */ def connectSlavePorts(domain: TilePRCIDomain[TileType], context: Attachable): Unit = { implicit val p = context.p DisableMonitors { implicit p => val controlBus = context.locateTLBusWrapper(crossingParams.slave.where) controlBus.coupleTo(tileParams.baseName) { bus => domain.crossSlavePort(crossingParams.crossingType) :*= crossingParams.slave.injectNode(context) :*= TLWidthWidget(controlBus.beatBytes) :*= bus } } } /** Connect the various interrupts sent to and and raised by the tile. */ def connectInterrupts(domain: TilePRCIDomain[TileType], context: TileContextType): Unit = { implicit val p = context.p // NOTE: The order of calls to := matters! They must match how interrupts // are decoded from tile.intInwardNode inside the tile. For this reason, // we stub out missing interrupts with constant sources here. // 1. Debug interrupt is definitely asynchronous in all cases. domain.element.intInwardNode := domain { IntSyncAsyncCrossingSink(3) } := context.debugNodes(domain.element.tileId) // 2. The CLINT and PLIC output interrupts are synchronous to the CLINT/PLIC respectively, // so might need to be synchronized depending on the Tile's crossing type. // From CLINT: "msip" and "mtip" context.msipDomain { domain.crossIntIn(crossingParams.crossingType, domain.element.intInwardNode) := context.msipNodes(domain.element.tileId) } // From PLIC: "meip" context.meipDomain { domain.crossIntIn(crossingParams.crossingType, domain.element.intInwardNode) := context.meipNodes(domain.element.tileId) } // From PLIC: "seip" (only if supervisor mode is enabled) if (domain.element.tileParams.core.hasSupervisorMode) { context.seipDomain { domain.crossIntIn(crossingParams.crossingType, domain.element.intInwardNode) := context.seipNodes(domain.element.tileId) } } // 3. Local Interrupts ("lip") are required to already be synchronous to the Tile's clock. // (they are connected to domain.element.intInwardNode in a seperate trait) // 4. Interrupts coming out of the tile are sent to the PLIC, // so might need to be synchronized depending on the Tile's crossing type. context.tileToPlicNodes.get(domain.element.tileId).foreach { node => FlipRendering { implicit p => domain.element.intOutwardNode.foreach { out => context.toPlicDomain { node := domain.crossIntOut(crossingParams.crossingType, out) } }} } // 5. Connect NMI inputs to the tile. These inputs are synchronous to the respective core_clock. domain.element.nmiNode.foreach(_ := context.nmiNodes(domain.element.tileId)) } /** Notifications of tile status are connected to be broadcast without needing to be clock-crossed. */ def connectOutputNotifications(domain: TilePRCIDomain[TileType], context: TileContextType): Unit = { implicit val p = context.p domain { context.tileHaltXbarNode :=* domain.crossIntOut(NoCrossing, domain.element.haltNode) context.tileWFIXbarNode :=* domain.crossIntOut(NoCrossing, domain.element.wfiNode) context.tileCeaseXbarNode :=* domain.crossIntOut(NoCrossing, domain.element.ceaseNode) } // TODO should context be forced to have a trace sink connected here? // for now this just ensures domain.trace[Core]Node has been crossed without connecting it externally } /** Connect inputs to the tile that are assumed to be constant during normal operation, and so are not clock-crossed. */ def connectInputConstants(domain: TilePRCIDomain[TileType], context: TileContextType): Unit = { implicit val p = context.p val tlBusToGetPrefixFrom = context.locateTLBusWrapper(crossingParams.mmioBaseAddressPrefixWhere) domain.element.hartIdNode := context.tileHartIdNodes(domain.element.tileId) domain.element.resetVectorNode := context.tileResetVectorNodes(domain.element.tileId) tlBusToGetPrefixFrom.prefixNode.foreach { domain.element.mmioAddressPrefixNode := _ } } /** Connect power/reset/clock resources. */ def connectPRC(domain: TilePRCIDomain[TileType], context: TileContextType): Unit = { implicit val p = context.p val tlBusToGetClockDriverFrom = context.locateTLBusWrapper(crossingParams.master.where) (crossingParams.crossingType match { case _: SynchronousCrossing | _: CreditedCrossing => if (crossingParams.forceSeparateClockReset) { domain.clockNode := tlBusToGetClockDriverFrom.clockNode } else { domain.clockNode := tlBusToGetClockDriverFrom.fixedClockNode } case _: RationalCrossing => domain.clockNode := tlBusToGetClockDriverFrom.clockNode case _: AsynchronousCrossing => { val tileClockGroup = ClockGroup() tileClockGroup := context.allClockGroupsNode domain.clockNode := tileClockGroup } }) domain { domain.element_reset_domain.clockNode := crossingParams.resetCrossingType.injectClockNode := domain.clockNode } } /** Function to handle all trace crossings when tile is instantiated inside domains */ def connectTrace(domain: TilePRCIDomain[TileType], context: TileContextType): Unit = { implicit val p = context.p val traceCrossingNode = BundleBridgeBlockDuringReset[TraceBundle]( resetCrossingType = crossingParams.resetCrossingType) context.traceNodes(domain.element.tileId) := traceCrossingNode := domain.element.traceNode val traceCoreCrossingNode = BundleBridgeBlockDuringReset[TraceCoreInterface]( resetCrossingType = crossingParams.resetCrossingType) context.traceCoreNodes(domain.element.tileId) :*= traceCoreCrossingNode := domain.element.traceCoreNode } } case class CloneTileAttachParams( sourceTileId: Int, cloneParams: CanAttachTile ) extends CanAttachTile { type TileType = cloneParams.TileType type TileContextType = cloneParams.TileContextType def tileParams = cloneParams.tileParams def crossingParams = cloneParams.crossingParams override def instantiate(allTileParams: Seq[TileParams], instantiatedTiles: SortedMap[Int, TilePRCIDomain[_]])(implicit p: Parameters): TilePRCIDomain[TileType] = { require(instantiatedTiles.contains(sourceTileId)) val clockSinkParams = tileParams.clockSinkParams.copy(name = Some(tileParams.uniqueName)) val tile_prci_domain = CloneLazyModule( new TilePRCIDomain[TileType](clockSinkParams, crossingParams) { self => val element = self.element_reset_domain { LazyModule(tileParams.instantiate(crossingParams, PriorityMuxHartIdFromSeq(allTileParams))) } }, instantiatedTiles(sourceTileId).asInstanceOf[TilePRCIDomain[TileType]] ) tile_prci_domain } } File ClockGroup.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.prci import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.resources.FixedClockResource case class ClockGroupingNode(groupName: String)(implicit valName: ValName) extends MixedNexusNode(ClockGroupImp, ClockImp)( dFn = { _ => ClockSourceParameters() }, uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) }) { override def circuitIdentity = outputs.size == 1 } class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule { val node = ClockGroupingNode(groupName) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in(0) val (out, _) = node.out.unzip require (node.in.size == 1) require (in.member.size == out.size) (in.member.data zip out) foreach { case (i, o) => o := i } } } object ClockGroup { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node } case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName) extends NexusNode(ClockGroupImp)( dFn = { _ => ClockGroupSourceParameters() }, uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))}) { override def circuitIdentity = outputs.size == 1 } class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule { val node = ClockGroupAggregateNode(groupName) override lazy val desiredName = s"ClockGroupAggregator_$groupName" lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in.unzip val (out, _) = node.out.unzip val outputs = out.flatMap(_.member.data) require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1") require (in.head.member.size == outputs.size) in.head.member.data.zip(outputs).foreach { case (i, o) => o := i } } } object ClockGroupAggregator { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node } class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule { val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() }) lazy val module = new Impl class Impl extends LazyModuleImp(this) { val (out, _) = node.out.unzip out.map { out: ClockGroupBundle => out.member.data.foreach { o => o.clock := clock; o.reset := reset } } } } object SimpleClockGroupSource { def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node } case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName) extends NexusNode(ClockImp)( dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) }, uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) }, inputRequiresOutput = false) { def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix))) } class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule { val node = new FixedClockBroadcastNode(fixedClockOpt) { override def circuitIdentity = outputs.size == 1 } lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in(0) val (out, _) = node.out.unzip override def desiredName = s"FixedClockBroadcast_${out.size}" require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock") out.foreach { _ := in } } } object FixedClockBroadcast { def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node } case class PRCIClockGroupNode()(implicit valName: ValName) extends NexusNode(ClockGroupImp)( dFn = { _ => ClockGroupSourceParameters() }, uFn = { _ => ClockGroupSinkParameters("prci", Nil) }, outputRequiresInput = false) File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module TilePRCIDomain( // @[ClockDomain.scala:14:9] output auto_intsink_out_1_0, // @[LazyModuleImp.scala:107:25] input auto_intsink_in_sync_0, // @[LazyModuleImp.scala:107:25] output auto_element_reset_domain_rockettile_trace_source_out_insns_0_valid, // @[LazyModuleImp.scala:107:25] output [39:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_iaddr, // @[LazyModuleImp.scala:107:25] output [31:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_insn, // @[LazyModuleImp.scala:107:25] output [2:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_priv, // @[LazyModuleImp.scala:107:25] output auto_element_reset_domain_rockettile_trace_source_out_insns_0_exception, // @[LazyModuleImp.scala:107:25] output auto_element_reset_domain_rockettile_trace_source_out_insns_0_interrupt, // @[LazyModuleImp.scala:107:25] output [63:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_cause, // @[LazyModuleImp.scala:107:25] output [39:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_tval, // @[LazyModuleImp.scala:107:25] output [63:0] auto_element_reset_domain_rockettile_trace_source_out_time, // @[LazyModuleImp.scala:107:25] input auto_element_reset_domain_rockettile_hartid_in, // @[LazyModuleImp.scala:107:25] input auto_int_in_clock_xing_in_2_sync_0, // @[LazyModuleImp.scala:107:25] input auto_int_in_clock_xing_in_1_sync_0, // @[LazyModuleImp.scala:107:25] input auto_int_in_clock_xing_in_0_sync_0, // @[LazyModuleImp.scala:107:25] input auto_int_in_clock_xing_in_0_sync_1, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_tl_master_clock_xing_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_tl_master_clock_xing_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_tl_master_clock_xing_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_tl_master_clock_xing_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_b_ready, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_b_valid, // @[LazyModuleImp.scala:107:25] input [1:0] auto_tl_master_clock_xing_out_b_bits_param, // @[LazyModuleImp.scala:107:25] input [31:0] auto_tl_master_clock_xing_out_b_bits_address, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_c_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_c_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_out_c_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_tl_master_clock_xing_out_c_bits_size, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_out_c_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_tl_master_clock_xing_out_c_bits_address, // @[LazyModuleImp.scala:107:25] output [63:0] auto_tl_master_clock_xing_out_c_bits_data, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_tl_master_clock_xing_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_tl_master_clock_xing_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_master_clock_xing_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_tl_master_clock_xing_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_tl_master_clock_xing_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_tl_master_clock_xing_out_e_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_master_clock_xing_out_e_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_tap_clock_in_clock, // @[LazyModuleImp.scala:107:25] input auto_tap_clock_in_reset // @[LazyModuleImp.scala:107:25] ); wire clockNode_auto_anon_in_reset; // @[ClockGroup.scala:104:9] wire clockNode_auto_anon_in_clock; // @[ClockGroup.scala:104:9] wire element_reset_domain_auto_clock_in_reset; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_clock_in_clock; // @[ClockDomain.scala:14:9] wire auto_intsink_in_sync_0_0 = auto_intsink_in_sync_0; // @[ClockDomain.scala:14:9] wire auto_element_reset_domain_rockettile_hartid_in_0 = auto_element_reset_domain_rockettile_hartid_in; // @[ClockDomain.scala:14:9] wire auto_int_in_clock_xing_in_2_sync_0_0 = auto_int_in_clock_xing_in_2_sync_0; // @[ClockDomain.scala:14:9] wire auto_int_in_clock_xing_in_1_sync_0_0 = auto_int_in_clock_xing_in_1_sync_0; // @[ClockDomain.scala:14:9] wire auto_int_in_clock_xing_in_0_sync_0_0 = auto_int_in_clock_xing_in_0_sync_0; // @[ClockDomain.scala:14:9] wire auto_int_in_clock_xing_in_0_sync_1_0 = auto_int_in_clock_xing_in_0_sync_1; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_a_ready_0 = auto_tl_master_clock_xing_out_a_ready; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_b_valid_0 = auto_tl_master_clock_xing_out_b_valid; // @[ClockDomain.scala:14:9] wire [1:0] auto_tl_master_clock_xing_out_b_bits_param_0 = auto_tl_master_clock_xing_out_b_bits_param; // @[ClockDomain.scala:14:9] wire [31:0] auto_tl_master_clock_xing_out_b_bits_address_0 = auto_tl_master_clock_xing_out_b_bits_address; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_c_ready_0 = auto_tl_master_clock_xing_out_c_ready; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_d_valid_0 = auto_tl_master_clock_xing_out_d_valid; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_d_bits_opcode_0 = auto_tl_master_clock_xing_out_d_bits_opcode; // @[ClockDomain.scala:14:9] wire [1:0] auto_tl_master_clock_xing_out_d_bits_param_0 = auto_tl_master_clock_xing_out_d_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] auto_tl_master_clock_xing_out_d_bits_size_0 = auto_tl_master_clock_xing_out_d_bits_size; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_d_bits_source_0 = auto_tl_master_clock_xing_out_d_bits_source; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_d_bits_sink_0 = auto_tl_master_clock_xing_out_d_bits_sink; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_d_bits_denied_0 = auto_tl_master_clock_xing_out_d_bits_denied; // @[ClockDomain.scala:14:9] wire [63:0] auto_tl_master_clock_xing_out_d_bits_data_0 = auto_tl_master_clock_xing_out_d_bits_data; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_d_bits_corrupt_0 = auto_tl_master_clock_xing_out_d_bits_corrupt; // @[ClockDomain.scala:14:9] wire auto_tap_clock_in_clock_0 = auto_tap_clock_in_clock; // @[ClockDomain.scala:14:9] wire auto_tap_clock_in_reset_0 = auto_tap_clock_in_reset; // @[ClockDomain.scala:14:9] wire [31:0] auto_element_reset_domain_rockettile_trace_core_source_out_group_0_iaddr = 32'h0; // @[ClockDomain.scala:14:9] wire [31:0] auto_element_reset_domain_rockettile_trace_core_source_out_tval = 32'h0; // @[ClockDomain.scala:14:9] wire [31:0] auto_element_reset_domain_rockettile_trace_core_source_out_cause = 32'h0; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_trace_core_source_out_group_0_iaddr = 32'h0; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_trace_core_source_out_tval = 32'h0; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_trace_core_source_out_cause = 32'h0; // @[ClockDomain.scala:14:9] wire [3:0] auto_element_reset_domain_rockettile_trace_core_source_out_group_0_itype = 4'h0; // @[ClockDomain.scala:14:9] wire [3:0] auto_element_reset_domain_rockettile_trace_core_source_out_priv = 4'h0; // @[ClockDomain.scala:14:9] wire [3:0] element_reset_domain_auto_rockettile_trace_core_source_out_group_0_itype = 4'h0; // @[ClockDomain.scala:14:9] wire [3:0] element_reset_domain_auto_rockettile_trace_core_source_out_priv = 4'h0; // @[ClockDomain.scala:14:9] wire [31:0] auto_element_reset_domain_rockettile_reset_vector_in = 32'h10000; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_reset_vector_in = 32'h10000; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_b_bits_opcode = 3'h6; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingOut_b_bits_opcode = 3'h6; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingIn_b_bits_opcode = 3'h6; // @[MixedNode.scala:551:17] wire [3:0] auto_tl_master_clock_xing_out_b_bits_size = 4'h6; // @[ClockDomain.scala:14:9] wire [3:0] tlMasterClockXingOut_b_bits_size = 4'h6; // @[MixedNode.scala:542:17] wire [3:0] tlMasterClockXingIn_b_bits_size = 4'h6; // @[MixedNode.scala:551:17] wire [2:0] auto_tl_master_clock_xing_out_b_bits_source = 3'h0; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingOut_b_bits_source = 3'h0; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingIn_b_bits_source = 3'h0; // @[MixedNode.scala:551:17] wire [7:0] auto_tl_master_clock_xing_out_b_bits_mask = 8'hFF; // @[ClockDomain.scala:14:9] wire [7:0] tlMasterClockXingOut_b_bits_mask = 8'hFF; // @[MixedNode.scala:542:17] wire [7:0] tlMasterClockXingIn_b_bits_mask = 8'hFF; // @[MixedNode.scala:551:17] wire [63:0] auto_tl_master_clock_xing_out_b_bits_data = 64'h0; // @[ClockDomain.scala:14:9] wire [63:0] tlMasterClockXingOut_b_bits_data = 64'h0; // @[MixedNode.scala:542:17] wire [63:0] tlMasterClockXingIn_b_bits_data = 64'h0; // @[MixedNode.scala:551:17] wire auto_tl_master_clock_xing_out_e_ready = 1'h1; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_e_ready = 1'h1; // @[MixedNode.scala:542:17] wire tlMasterClockXingIn_e_ready = 1'h1; // @[MixedNode.scala:551:17] wire auto_intsink_out_2_0 = 1'h0; // @[ClockDomain.scala:14:9] wire auto_intsink_out_0_0 = 1'h0; // @[ClockDomain.scala:14:9] wire auto_element_reset_domain_rockettile_trace_core_source_out_group_0_iretire = 1'h0; // @[ClockDomain.scala:14:9] wire auto_element_reset_domain_rockettile_trace_core_source_out_group_0_ilastsize = 1'h0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_b_bits_corrupt = 1'h0; // @[ClockDomain.scala:14:9] wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire element_reset_domain_auto_rockettile_buffer_out_a_bits_corrupt = 1'h0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_c_bits_corrupt = 1'h0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_cease_out_0 = 1'h0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_halt_out_0 = 1'h0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_trace_core_source_out_group_0_iretire = 1'h0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_trace_core_source_out_group_0_ilastsize = 1'h0; // @[ClockDomain.scala:14:9] wire element_reset_domain__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire clockNode_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire clockNode_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire clockNode__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire tlMasterClockXingOut_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire tlMasterClockXingIn_b_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire intOutClockXingOut_sync_0 = 1'h0; // @[MixedNode.scala:542:17] wire intOutClockXingIn_sync_0 = 1'h0; // @[MixedNode.scala:551:17] wire intOutClockXingOut_1_sync_0 = 1'h0; // @[MixedNode.scala:542:17] wire intOutClockXingIn_1_sync_0 = 1'h0; // @[MixedNode.scala:551:17] wire intOutClockXingOut_4_sync_0 = 1'h0; // @[MixedNode.scala:542:17] wire intOutClockXingIn_4_sync_0 = 1'h0; // @[MixedNode.scala:551:17] wire intOutClockXingOut_5_sync_0 = 1'h0; // @[MixedNode.scala:542:17] wire intOutClockXingIn_5_sync_0 = 1'h0; // @[MixedNode.scala:551:17] wire element_reset_domain_auto_rockettile_trace_source_out_insns_0_valid; // @[ClockDomain.scala:14:9] wire [39:0] element_reset_domain_auto_rockettile_trace_source_out_insns_0_iaddr; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_trace_source_out_insns_0_insn; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_trace_source_out_insns_0_priv; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_trace_source_out_insns_0_exception; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_trace_source_out_insns_0_interrupt; // @[ClockDomain.scala:14:9] wire [63:0] element_reset_domain_auto_rockettile_trace_source_out_insns_0_cause; // @[ClockDomain.scala:14:9] wire [39:0] element_reset_domain_auto_rockettile_trace_source_out_insns_0_tval; // @[ClockDomain.scala:14:9] wire [63:0] element_reset_domain_auto_rockettile_trace_source_out_time; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_hartid_in = auto_element_reset_domain_rockettile_hartid_in_0; // @[ClockDomain.scala:14:9] wire intInClockXingIn_2_sync_0 = auto_int_in_clock_xing_in_2_sync_0_0; // @[ClockDomain.scala:14:9] wire intInClockXingIn_1_sync_0 = auto_int_in_clock_xing_in_1_sync_0_0; // @[ClockDomain.scala:14:9] wire intInClockXingIn_sync_0 = auto_int_in_clock_xing_in_0_sync_0_0; // @[ClockDomain.scala:14:9] wire intInClockXingIn_sync_1 = auto_int_in_clock_xing_in_0_sync_1_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_a_ready = auto_tl_master_clock_xing_out_a_ready_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingOut_a_bits_param; // @[MixedNode.scala:542:17] wire [3:0] tlMasterClockXingOut_a_bits_size; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] tlMasterClockXingOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] tlMasterClockXingOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] tlMasterClockXingOut_a_bits_data; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_b_ready; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_b_valid = auto_tl_master_clock_xing_out_b_valid_0; // @[ClockDomain.scala:14:9] wire [1:0] tlMasterClockXingOut_b_bits_param = auto_tl_master_clock_xing_out_b_bits_param_0; // @[ClockDomain.scala:14:9] wire [31:0] tlMasterClockXingOut_b_bits_address = auto_tl_master_clock_xing_out_b_bits_address_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_c_ready = auto_tl_master_clock_xing_out_c_ready_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_c_valid; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingOut_c_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingOut_c_bits_param; // @[MixedNode.scala:542:17] wire [3:0] tlMasterClockXingOut_c_bits_size; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingOut_c_bits_source; // @[MixedNode.scala:542:17] wire [31:0] tlMasterClockXingOut_c_bits_address; // @[MixedNode.scala:542:17] wire [63:0] tlMasterClockXingOut_c_bits_data; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_c_bits_corrupt; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_d_ready; // @[MixedNode.scala:542:17] wire tlMasterClockXingOut_d_valid = auto_tl_master_clock_xing_out_d_valid_0; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingOut_d_bits_opcode = auto_tl_master_clock_xing_out_d_bits_opcode_0; // @[ClockDomain.scala:14:9] wire [1:0] tlMasterClockXingOut_d_bits_param = auto_tl_master_clock_xing_out_d_bits_param_0; // @[ClockDomain.scala:14:9] wire [3:0] tlMasterClockXingOut_d_bits_size = auto_tl_master_clock_xing_out_d_bits_size_0; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingOut_d_bits_source = auto_tl_master_clock_xing_out_d_bits_source_0; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingOut_d_bits_sink = auto_tl_master_clock_xing_out_d_bits_sink_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_d_bits_denied = auto_tl_master_clock_xing_out_d_bits_denied_0; // @[ClockDomain.scala:14:9] wire [63:0] tlMasterClockXingOut_d_bits_data = auto_tl_master_clock_xing_out_d_bits_data_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_d_bits_corrupt = auto_tl_master_clock_xing_out_d_bits_corrupt_0; // @[ClockDomain.scala:14:9] wire tlMasterClockXingOut_e_valid; // @[MixedNode.scala:542:17] wire [2:0] tlMasterClockXingOut_e_bits_sink; // @[MixedNode.scala:542:17] wire tapClockNodeIn_clock = auto_tap_clock_in_clock_0; // @[ClockDomain.scala:14:9] wire tapClockNodeIn_reset = auto_tap_clock_in_reset_0; // @[ClockDomain.scala:14:9] wire auto_intsink_out_1_0_0; // @[ClockDomain.scala:14:9] wire auto_element_reset_domain_rockettile_trace_source_out_insns_0_valid_0; // @[ClockDomain.scala:14:9] wire [39:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_iaddr_0; // @[ClockDomain.scala:14:9] wire [31:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_insn_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_priv_0; // @[ClockDomain.scala:14:9] wire auto_element_reset_domain_rockettile_trace_source_out_insns_0_exception_0; // @[ClockDomain.scala:14:9] wire auto_element_reset_domain_rockettile_trace_source_out_insns_0_interrupt_0; // @[ClockDomain.scala:14:9] wire [63:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_cause_0; // @[ClockDomain.scala:14:9] wire [39:0] auto_element_reset_domain_rockettile_trace_source_out_insns_0_tval_0; // @[ClockDomain.scala:14:9] wire [63:0] auto_element_reset_domain_rockettile_trace_source_out_time_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_a_bits_param_0; // @[ClockDomain.scala:14:9] wire [3:0] auto_tl_master_clock_xing_out_a_bits_size_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_a_bits_source_0; // @[ClockDomain.scala:14:9] wire [31:0] auto_tl_master_clock_xing_out_a_bits_address_0; // @[ClockDomain.scala:14:9] wire [7:0] auto_tl_master_clock_xing_out_a_bits_mask_0; // @[ClockDomain.scala:14:9] wire [63:0] auto_tl_master_clock_xing_out_a_bits_data_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_a_valid_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_b_ready_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_c_bits_opcode_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_c_bits_param_0; // @[ClockDomain.scala:14:9] wire [3:0] auto_tl_master_clock_xing_out_c_bits_size_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_c_bits_source_0; // @[ClockDomain.scala:14:9] wire [31:0] auto_tl_master_clock_xing_out_c_bits_address_0; // @[ClockDomain.scala:14:9] wire [63:0] auto_tl_master_clock_xing_out_c_bits_data_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_c_bits_corrupt_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_c_valid_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_d_ready_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_tl_master_clock_xing_out_e_bits_sink_0; // @[ClockDomain.scala:14:9] wire auto_tl_master_clock_xing_out_e_valid_0; // @[ClockDomain.scala:14:9] wire childClock; // @[LazyModuleImp.scala:155:31] wire childReset; // @[LazyModuleImp.scala:158:31] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_valid_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_valid; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_iaddr_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_iaddr; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_insn_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_insn; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_priv_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_priv; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_exception_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_exception; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_interrupt_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_interrupt; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_cause_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_cause; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_tval_0 = element_reset_domain_auto_rockettile_trace_source_out_insns_0_tval; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_time_0 = element_reset_domain_auto_rockettile_trace_source_out_time; // @[ClockDomain.scala:14:9] wire clockNode_auto_anon_out_clock; // @[ClockGroup.scala:104:9] wire element_reset_domain_clockNodeIn_clock = element_reset_domain_auto_clock_in_clock; // @[ClockDomain.scala:14:9] wire clockNode_auto_anon_out_reset; // @[ClockGroup.scala:104:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_opcode; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_size; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_source; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_address; // @[ClockDomain.scala:14:9] wire [7:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_mask; // @[ClockDomain.scala:14:9] wire [63:0] element_reset_domain_auto_rockettile_buffer_out_a_bits_data; // @[ClockDomain.scala:14:9] wire element_reset_domain_clockNodeIn_reset = element_reset_domain_auto_clock_in_reset; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_a_ready; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_a_valid; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_opcode; // @[ClockDomain.scala:14:9] wire [1:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_size; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_source; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_address; // @[ClockDomain.scala:14:9] wire [7:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_mask; // @[ClockDomain.scala:14:9] wire [63:0] element_reset_domain_auto_rockettile_buffer_out_b_bits_data; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_b_bits_corrupt; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_b_ready; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_b_valid; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_c_bits_opcode; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_c_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] element_reset_domain_auto_rockettile_buffer_out_c_bits_size; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_c_bits_source; // @[ClockDomain.scala:14:9] wire [31:0] element_reset_domain_auto_rockettile_buffer_out_c_bits_address; // @[ClockDomain.scala:14:9] wire [63:0] element_reset_domain_auto_rockettile_buffer_out_c_bits_data; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_c_ready; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_c_valid; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_d_bits_opcode; // @[ClockDomain.scala:14:9] wire [1:0] element_reset_domain_auto_rockettile_buffer_out_d_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] element_reset_domain_auto_rockettile_buffer_out_d_bits_size; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_d_bits_source; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_d_bits_sink; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_d_bits_denied; // @[ClockDomain.scala:14:9] wire [63:0] element_reset_domain_auto_rockettile_buffer_out_d_bits_data; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_d_bits_corrupt; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_d_ready; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_d_valid; // @[ClockDomain.scala:14:9] wire [2:0] element_reset_domain_auto_rockettile_buffer_out_e_bits_sink; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_e_ready; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_buffer_out_e_valid; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_wfi_out_0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_int_local_in_3_0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_int_local_in_2_0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_int_local_in_1_0; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_int_local_in_1_1; // @[ClockDomain.scala:14:9] wire element_reset_domain_auto_rockettile_int_local_in_0_0; // @[ClockDomain.scala:14:9] wire element_reset_domain_childClock; // @[LazyModuleImp.scala:155:31] wire element_reset_domain_childReset; // @[LazyModuleImp.scala:158:31] assign element_reset_domain_childClock = element_reset_domain_clockNodeIn_clock; // @[MixedNode.scala:551:17] assign element_reset_domain_childReset = element_reset_domain_clockNodeIn_reset; // @[MixedNode.scala:551:17] wire tapClockNodeOut_clock; // @[MixedNode.scala:542:17] wire clockNode_anonIn_clock = clockNode_auto_anon_in_clock; // @[ClockGroup.scala:104:9] wire tapClockNodeOut_reset; // @[MixedNode.scala:542:17] wire clockNode_anonOut_clock; // @[MixedNode.scala:542:17] wire clockNode_anonIn_reset = clockNode_auto_anon_in_reset; // @[ClockGroup.scala:104:9] assign element_reset_domain_auto_clock_in_clock = clockNode_auto_anon_out_clock; // @[ClockGroup.scala:104:9] wire clockNode_anonOut_reset; // @[MixedNode.scala:542:17] assign element_reset_domain_auto_clock_in_reset = clockNode_auto_anon_out_reset; // @[ClockGroup.scala:104:9] assign clockNode_auto_anon_out_clock = clockNode_anonOut_clock; // @[ClockGroup.scala:104:9] assign clockNode_auto_anon_out_reset = clockNode_anonOut_reset; // @[ClockGroup.scala:104:9] assign clockNode_anonOut_clock = clockNode_anonIn_clock; // @[MixedNode.scala:542:17, :551:17] assign clockNode_anonOut_reset = clockNode_anonIn_reset; // @[MixedNode.scala:542:17, :551:17] assign clockNode_auto_anon_in_clock = tapClockNodeOut_clock; // @[ClockGroup.scala:104:9] assign clockNode_auto_anon_in_reset = tapClockNodeOut_reset; // @[ClockGroup.scala:104:9] assign childClock = tapClockNodeIn_clock; // @[MixedNode.scala:551:17] assign tapClockNodeOut_clock = tapClockNodeIn_clock; // @[MixedNode.scala:542:17, :551:17] assign childReset = tapClockNodeIn_reset; // @[MixedNode.scala:551:17] assign tapClockNodeOut_reset = tapClockNodeIn_reset; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_a_ready = tlMasterClockXingOut_a_ready; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_a_valid; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_valid_0 = tlMasterClockXingOut_a_valid; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingIn_a_bits_opcode; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_opcode_0 = tlMasterClockXingOut_a_bits_opcode; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingIn_a_bits_param; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_param_0 = tlMasterClockXingOut_a_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] tlMasterClockXingIn_a_bits_size; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_size_0 = tlMasterClockXingOut_a_bits_size; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingIn_a_bits_source; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_source_0 = tlMasterClockXingOut_a_bits_source; // @[ClockDomain.scala:14:9] wire [31:0] tlMasterClockXingIn_a_bits_address; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_address_0 = tlMasterClockXingOut_a_bits_address; // @[ClockDomain.scala:14:9] wire [7:0] tlMasterClockXingIn_a_bits_mask; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_mask_0 = tlMasterClockXingOut_a_bits_mask; // @[ClockDomain.scala:14:9] wire [63:0] tlMasterClockXingIn_a_bits_data; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_data_0 = tlMasterClockXingOut_a_bits_data; // @[ClockDomain.scala:14:9] wire tlMasterClockXingIn_a_bits_corrupt; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_a_bits_corrupt_0 = tlMasterClockXingOut_a_bits_corrupt; // @[ClockDomain.scala:14:9] wire tlMasterClockXingIn_b_ready; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_b_ready_0 = tlMasterClockXingOut_b_ready; // @[ClockDomain.scala:14:9] wire tlMasterClockXingIn_b_valid = tlMasterClockXingOut_b_valid; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlMasterClockXingIn_b_bits_param = tlMasterClockXingOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [31:0] tlMasterClockXingIn_b_bits_address = tlMasterClockXingOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_c_ready = tlMasterClockXingOut_c_ready; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_c_valid; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_valid_0 = tlMasterClockXingOut_c_valid; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingIn_c_bits_opcode; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_opcode_0 = tlMasterClockXingOut_c_bits_opcode; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingIn_c_bits_param; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_param_0 = tlMasterClockXingOut_c_bits_param; // @[ClockDomain.scala:14:9] wire [3:0] tlMasterClockXingIn_c_bits_size; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_size_0 = tlMasterClockXingOut_c_bits_size; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingIn_c_bits_source; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_source_0 = tlMasterClockXingOut_c_bits_source; // @[ClockDomain.scala:14:9] wire [31:0] tlMasterClockXingIn_c_bits_address; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_address_0 = tlMasterClockXingOut_c_bits_address; // @[ClockDomain.scala:14:9] wire [63:0] tlMasterClockXingIn_c_bits_data; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_data_0 = tlMasterClockXingOut_c_bits_data; // @[ClockDomain.scala:14:9] wire tlMasterClockXingIn_c_bits_corrupt; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_c_bits_corrupt_0 = tlMasterClockXingOut_c_bits_corrupt; // @[ClockDomain.scala:14:9] wire tlMasterClockXingIn_d_ready; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_d_ready_0 = tlMasterClockXingOut_d_ready; // @[ClockDomain.scala:14:9] wire tlMasterClockXingIn_d_valid = tlMasterClockXingOut_d_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingIn_d_bits_opcode = tlMasterClockXingOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlMasterClockXingIn_d_bits_param = tlMasterClockXingOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [3:0] tlMasterClockXingIn_d_bits_size = tlMasterClockXingOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingIn_d_bits_source = tlMasterClockXingOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlMasterClockXingIn_d_bits_sink = tlMasterClockXingOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_d_bits_denied = tlMasterClockXingOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] wire [63:0] tlMasterClockXingIn_d_bits_data = tlMasterClockXingOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_d_bits_corrupt = tlMasterClockXingOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire tlMasterClockXingIn_e_valid; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_e_valid_0 = tlMasterClockXingOut_e_valid; // @[ClockDomain.scala:14:9] wire [2:0] tlMasterClockXingIn_e_bits_sink; // @[MixedNode.scala:551:17] assign auto_tl_master_clock_xing_out_e_bits_sink_0 = tlMasterClockXingOut_e_bits_sink; // @[ClockDomain.scala:14:9] assign tlMasterClockXingOut_a_valid = tlMasterClockXingIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_opcode = tlMasterClockXingIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_param = tlMasterClockXingIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_size = tlMasterClockXingIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_source = tlMasterClockXingIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_address = tlMasterClockXingIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_mask = tlMasterClockXingIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_data = tlMasterClockXingIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_a_bits_corrupt = tlMasterClockXingIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_b_ready = tlMasterClockXingIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_valid = tlMasterClockXingIn_c_valid; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_opcode = tlMasterClockXingIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_param = tlMasterClockXingIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_size = tlMasterClockXingIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_source = tlMasterClockXingIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_address = tlMasterClockXingIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_data = tlMasterClockXingIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_c_bits_corrupt = tlMasterClockXingIn_c_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_d_ready = tlMasterClockXingIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_e_valid = tlMasterClockXingIn_e_valid; // @[MixedNode.scala:542:17, :551:17] assign tlMasterClockXingOut_e_bits_sink = tlMasterClockXingIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire intInClockXingOut_sync_0; // @[MixedNode.scala:542:17] wire intInClockXingOut_sync_1; // @[MixedNode.scala:542:17] assign intInClockXingOut_sync_0 = intInClockXingIn_sync_0; // @[MixedNode.scala:542:17, :551:17] assign intInClockXingOut_sync_1 = intInClockXingIn_sync_1; // @[MixedNode.scala:542:17, :551:17] wire intInClockXingOut_1_sync_0; // @[MixedNode.scala:542:17] assign intInClockXingOut_1_sync_0 = intInClockXingIn_1_sync_0; // @[MixedNode.scala:542:17, :551:17] wire intInClockXingOut_2_sync_0; // @[MixedNode.scala:542:17] assign intInClockXingOut_2_sync_0 = intInClockXingIn_2_sync_0; // @[MixedNode.scala:542:17, :551:17] wire intOutClockXingIn_2_sync_0; // @[MixedNode.scala:551:17] wire intOutClockXingOut_2_sync_0; // @[MixedNode.scala:542:17] wire intOutClockXingOut_3_sync_0; // @[MixedNode.scala:542:17] assign intOutClockXingOut_2_sync_0 = intOutClockXingIn_2_sync_0; // @[MixedNode.scala:542:17, :551:17] wire intOutClockXingIn_3_sync_0; // @[MixedNode.scala:551:17] assign intOutClockXingIn_2_sync_0 = intOutClockXingOut_3_sync_0; // @[MixedNode.scala:542:17, :551:17] assign intOutClockXingOut_3_sync_0 = intOutClockXingIn_3_sync_0; // @[MixedNode.scala:542:17, :551:17] RocketTile element_reset_domain_rockettile ( // @[HasTiles.scala:164:59] .clock (element_reset_domain_childClock), // @[LazyModuleImp.scala:155:31] .reset (element_reset_domain_childReset), // @[LazyModuleImp.scala:158:31] .auto_buffer_out_a_ready (element_reset_domain_auto_rockettile_buffer_out_a_ready), // @[ClockDomain.scala:14:9] .auto_buffer_out_a_valid (element_reset_domain_auto_rockettile_buffer_out_a_valid), .auto_buffer_out_a_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_a_bits_opcode), .auto_buffer_out_a_bits_param (element_reset_domain_auto_rockettile_buffer_out_a_bits_param), .auto_buffer_out_a_bits_size (element_reset_domain_auto_rockettile_buffer_out_a_bits_size), .auto_buffer_out_a_bits_source (element_reset_domain_auto_rockettile_buffer_out_a_bits_source), .auto_buffer_out_a_bits_address (element_reset_domain_auto_rockettile_buffer_out_a_bits_address), .auto_buffer_out_a_bits_mask (element_reset_domain_auto_rockettile_buffer_out_a_bits_mask), .auto_buffer_out_a_bits_data (element_reset_domain_auto_rockettile_buffer_out_a_bits_data), .auto_buffer_out_b_ready (element_reset_domain_auto_rockettile_buffer_out_b_ready), .auto_buffer_out_b_valid (element_reset_domain_auto_rockettile_buffer_out_b_valid), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_b_bits_opcode), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_param (element_reset_domain_auto_rockettile_buffer_out_b_bits_param), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_size (element_reset_domain_auto_rockettile_buffer_out_b_bits_size), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_source (element_reset_domain_auto_rockettile_buffer_out_b_bits_source), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_address (element_reset_domain_auto_rockettile_buffer_out_b_bits_address), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_mask (element_reset_domain_auto_rockettile_buffer_out_b_bits_mask), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_data (element_reset_domain_auto_rockettile_buffer_out_b_bits_data), // @[ClockDomain.scala:14:9] .auto_buffer_out_b_bits_corrupt (element_reset_domain_auto_rockettile_buffer_out_b_bits_corrupt), // @[ClockDomain.scala:14:9] .auto_buffer_out_c_ready (element_reset_domain_auto_rockettile_buffer_out_c_ready), // @[ClockDomain.scala:14:9] .auto_buffer_out_c_valid (element_reset_domain_auto_rockettile_buffer_out_c_valid), .auto_buffer_out_c_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_c_bits_opcode), .auto_buffer_out_c_bits_param (element_reset_domain_auto_rockettile_buffer_out_c_bits_param), .auto_buffer_out_c_bits_size (element_reset_domain_auto_rockettile_buffer_out_c_bits_size), .auto_buffer_out_c_bits_source (element_reset_domain_auto_rockettile_buffer_out_c_bits_source), .auto_buffer_out_c_bits_address (element_reset_domain_auto_rockettile_buffer_out_c_bits_address), .auto_buffer_out_c_bits_data (element_reset_domain_auto_rockettile_buffer_out_c_bits_data), .auto_buffer_out_d_ready (element_reset_domain_auto_rockettile_buffer_out_d_ready), .auto_buffer_out_d_valid (element_reset_domain_auto_rockettile_buffer_out_d_valid), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_d_bits_opcode), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_param (element_reset_domain_auto_rockettile_buffer_out_d_bits_param), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_size (element_reset_domain_auto_rockettile_buffer_out_d_bits_size), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_source (element_reset_domain_auto_rockettile_buffer_out_d_bits_source), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_sink (element_reset_domain_auto_rockettile_buffer_out_d_bits_sink), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_denied (element_reset_domain_auto_rockettile_buffer_out_d_bits_denied), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_data (element_reset_domain_auto_rockettile_buffer_out_d_bits_data), // @[ClockDomain.scala:14:9] .auto_buffer_out_d_bits_corrupt (element_reset_domain_auto_rockettile_buffer_out_d_bits_corrupt), // @[ClockDomain.scala:14:9] .auto_buffer_out_e_ready (element_reset_domain_auto_rockettile_buffer_out_e_ready), // @[ClockDomain.scala:14:9] .auto_buffer_out_e_valid (element_reset_domain_auto_rockettile_buffer_out_e_valid), .auto_buffer_out_e_bits_sink (element_reset_domain_auto_rockettile_buffer_out_e_bits_sink), .auto_wfi_out_0 (element_reset_domain_auto_rockettile_wfi_out_0), .auto_int_local_in_3_0 (element_reset_domain_auto_rockettile_int_local_in_3_0), // @[ClockDomain.scala:14:9] .auto_int_local_in_2_0 (element_reset_domain_auto_rockettile_int_local_in_2_0), // @[ClockDomain.scala:14:9] .auto_int_local_in_1_0 (element_reset_domain_auto_rockettile_int_local_in_1_0), // @[ClockDomain.scala:14:9] .auto_int_local_in_1_1 (element_reset_domain_auto_rockettile_int_local_in_1_1), // @[ClockDomain.scala:14:9] .auto_int_local_in_0_0 (element_reset_domain_auto_rockettile_int_local_in_0_0), // @[ClockDomain.scala:14:9] .auto_trace_source_out_insns_0_valid (element_reset_domain_auto_rockettile_trace_source_out_insns_0_valid), .auto_trace_source_out_insns_0_iaddr (element_reset_domain_auto_rockettile_trace_source_out_insns_0_iaddr), .auto_trace_source_out_insns_0_insn (element_reset_domain_auto_rockettile_trace_source_out_insns_0_insn), .auto_trace_source_out_insns_0_priv (element_reset_domain_auto_rockettile_trace_source_out_insns_0_priv), .auto_trace_source_out_insns_0_exception (element_reset_domain_auto_rockettile_trace_source_out_insns_0_exception), .auto_trace_source_out_insns_0_interrupt (element_reset_domain_auto_rockettile_trace_source_out_insns_0_interrupt), .auto_trace_source_out_insns_0_cause (element_reset_domain_auto_rockettile_trace_source_out_insns_0_cause), .auto_trace_source_out_insns_0_tval (element_reset_domain_auto_rockettile_trace_source_out_insns_0_tval), .auto_trace_source_out_time (element_reset_domain_auto_rockettile_trace_source_out_time), .auto_hartid_in (element_reset_domain_auto_rockettile_hartid_in) // @[ClockDomain.scala:14:9] ); // @[HasTiles.scala:164:59] TLBuffer_a32d64s3k3z4c_1 buffer ( // @[Buffer.scala:75:28] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset), // @[LazyModuleImp.scala:158:31] .auto_in_a_ready (element_reset_domain_auto_rockettile_buffer_out_a_ready), .auto_in_a_valid (element_reset_domain_auto_rockettile_buffer_out_a_valid), // @[ClockDomain.scala:14:9] .auto_in_a_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_a_bits_opcode), // @[ClockDomain.scala:14:9] .auto_in_a_bits_param (element_reset_domain_auto_rockettile_buffer_out_a_bits_param), // @[ClockDomain.scala:14:9] .auto_in_a_bits_size (element_reset_domain_auto_rockettile_buffer_out_a_bits_size), // @[ClockDomain.scala:14:9] .auto_in_a_bits_source (element_reset_domain_auto_rockettile_buffer_out_a_bits_source), // @[ClockDomain.scala:14:9] .auto_in_a_bits_address (element_reset_domain_auto_rockettile_buffer_out_a_bits_address), // @[ClockDomain.scala:14:9] .auto_in_a_bits_mask (element_reset_domain_auto_rockettile_buffer_out_a_bits_mask), // @[ClockDomain.scala:14:9] .auto_in_a_bits_data (element_reset_domain_auto_rockettile_buffer_out_a_bits_data), // @[ClockDomain.scala:14:9] .auto_in_b_ready (element_reset_domain_auto_rockettile_buffer_out_b_ready), // @[ClockDomain.scala:14:9] .auto_in_b_valid (element_reset_domain_auto_rockettile_buffer_out_b_valid), .auto_in_b_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_b_bits_opcode), .auto_in_b_bits_param (element_reset_domain_auto_rockettile_buffer_out_b_bits_param), .auto_in_b_bits_size (element_reset_domain_auto_rockettile_buffer_out_b_bits_size), .auto_in_b_bits_source (element_reset_domain_auto_rockettile_buffer_out_b_bits_source), .auto_in_b_bits_address (element_reset_domain_auto_rockettile_buffer_out_b_bits_address), .auto_in_b_bits_mask (element_reset_domain_auto_rockettile_buffer_out_b_bits_mask), .auto_in_b_bits_data (element_reset_domain_auto_rockettile_buffer_out_b_bits_data), .auto_in_b_bits_corrupt (element_reset_domain_auto_rockettile_buffer_out_b_bits_corrupt), .auto_in_c_ready (element_reset_domain_auto_rockettile_buffer_out_c_ready), .auto_in_c_valid (element_reset_domain_auto_rockettile_buffer_out_c_valid), // @[ClockDomain.scala:14:9] .auto_in_c_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_c_bits_opcode), // @[ClockDomain.scala:14:9] .auto_in_c_bits_param (element_reset_domain_auto_rockettile_buffer_out_c_bits_param), // @[ClockDomain.scala:14:9] .auto_in_c_bits_size (element_reset_domain_auto_rockettile_buffer_out_c_bits_size), // @[ClockDomain.scala:14:9] .auto_in_c_bits_source (element_reset_domain_auto_rockettile_buffer_out_c_bits_source), // @[ClockDomain.scala:14:9] .auto_in_c_bits_address (element_reset_domain_auto_rockettile_buffer_out_c_bits_address), // @[ClockDomain.scala:14:9] .auto_in_c_bits_data (element_reset_domain_auto_rockettile_buffer_out_c_bits_data), // @[ClockDomain.scala:14:9] .auto_in_d_ready (element_reset_domain_auto_rockettile_buffer_out_d_ready), // @[ClockDomain.scala:14:9] .auto_in_d_valid (element_reset_domain_auto_rockettile_buffer_out_d_valid), .auto_in_d_bits_opcode (element_reset_domain_auto_rockettile_buffer_out_d_bits_opcode), .auto_in_d_bits_param (element_reset_domain_auto_rockettile_buffer_out_d_bits_param), .auto_in_d_bits_size (element_reset_domain_auto_rockettile_buffer_out_d_bits_size), .auto_in_d_bits_source (element_reset_domain_auto_rockettile_buffer_out_d_bits_source), .auto_in_d_bits_sink (element_reset_domain_auto_rockettile_buffer_out_d_bits_sink), .auto_in_d_bits_denied (element_reset_domain_auto_rockettile_buffer_out_d_bits_denied), .auto_in_d_bits_data (element_reset_domain_auto_rockettile_buffer_out_d_bits_data), .auto_in_d_bits_corrupt (element_reset_domain_auto_rockettile_buffer_out_d_bits_corrupt), .auto_in_e_ready (element_reset_domain_auto_rockettile_buffer_out_e_ready), .auto_in_e_valid (element_reset_domain_auto_rockettile_buffer_out_e_valid), // @[ClockDomain.scala:14:9] .auto_in_e_bits_sink (element_reset_domain_auto_rockettile_buffer_out_e_bits_sink), // @[ClockDomain.scala:14:9] .auto_out_a_ready (tlMasterClockXingIn_a_ready), // @[MixedNode.scala:551:17] .auto_out_a_valid (tlMasterClockXingIn_a_valid), .auto_out_a_bits_opcode (tlMasterClockXingIn_a_bits_opcode), .auto_out_a_bits_param (tlMasterClockXingIn_a_bits_param), .auto_out_a_bits_size (tlMasterClockXingIn_a_bits_size), .auto_out_a_bits_source (tlMasterClockXingIn_a_bits_source), .auto_out_a_bits_address (tlMasterClockXingIn_a_bits_address), .auto_out_a_bits_mask (tlMasterClockXingIn_a_bits_mask), .auto_out_a_bits_data (tlMasterClockXingIn_a_bits_data), .auto_out_a_bits_corrupt (tlMasterClockXingIn_a_bits_corrupt), .auto_out_b_ready (tlMasterClockXingIn_b_ready), .auto_out_b_valid (tlMasterClockXingIn_b_valid), // @[MixedNode.scala:551:17] .auto_out_b_bits_param (tlMasterClockXingIn_b_bits_param), // @[MixedNode.scala:551:17] .auto_out_b_bits_address (tlMasterClockXingIn_b_bits_address), // @[MixedNode.scala:551:17] .auto_out_c_ready (tlMasterClockXingIn_c_ready), // @[MixedNode.scala:551:17] .auto_out_c_valid (tlMasterClockXingIn_c_valid), .auto_out_c_bits_opcode (tlMasterClockXingIn_c_bits_opcode), .auto_out_c_bits_param (tlMasterClockXingIn_c_bits_param), .auto_out_c_bits_size (tlMasterClockXingIn_c_bits_size), .auto_out_c_bits_source (tlMasterClockXingIn_c_bits_source), .auto_out_c_bits_address (tlMasterClockXingIn_c_bits_address), .auto_out_c_bits_data (tlMasterClockXingIn_c_bits_data), .auto_out_c_bits_corrupt (tlMasterClockXingIn_c_bits_corrupt), .auto_out_d_ready (tlMasterClockXingIn_d_ready), .auto_out_d_valid (tlMasterClockXingIn_d_valid), // @[MixedNode.scala:551:17] .auto_out_d_bits_opcode (tlMasterClockXingIn_d_bits_opcode), // @[MixedNode.scala:551:17] .auto_out_d_bits_param (tlMasterClockXingIn_d_bits_param), // @[MixedNode.scala:551:17] .auto_out_d_bits_size (tlMasterClockXingIn_d_bits_size), // @[MixedNode.scala:551:17] .auto_out_d_bits_source (tlMasterClockXingIn_d_bits_source), // @[MixedNode.scala:551:17] .auto_out_d_bits_sink (tlMasterClockXingIn_d_bits_sink), // @[MixedNode.scala:551:17] .auto_out_d_bits_denied (tlMasterClockXingIn_d_bits_denied), // @[MixedNode.scala:551:17] .auto_out_d_bits_data (tlMasterClockXingIn_d_bits_data), // @[MixedNode.scala:551:17] .auto_out_d_bits_corrupt (tlMasterClockXingIn_d_bits_corrupt), // @[MixedNode.scala:551:17] .auto_out_e_valid (tlMasterClockXingIn_e_valid), .auto_out_e_bits_sink (tlMasterClockXingIn_e_bits_sink) ); // @[Buffer.scala:75:28] TLBuffer_2 buffer_1 ( // @[Buffer.scala:75:28] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset) // @[LazyModuleImp.scala:158:31] ); // @[Buffer.scala:75:28] IntSyncAsyncCrossingSink_n1x1 intsink ( // @[Crossing.scala:86:29] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset), // @[LazyModuleImp.scala:158:31] .auto_in_sync_0 (auto_intsink_in_sync_0_0), // @[ClockDomain.scala:14:9] .auto_out_0 (element_reset_domain_auto_rockettile_int_local_in_0_0) ); // @[Crossing.scala:86:29] IntSyncSyncCrossingSink_n1x2 intsink_1 ( // @[Crossing.scala:109:29] .auto_in_sync_0 (intInClockXingOut_sync_0), // @[MixedNode.scala:542:17] .auto_in_sync_1 (intInClockXingOut_sync_1), // @[MixedNode.scala:542:17] .auto_out_0 (element_reset_domain_auto_rockettile_int_local_in_1_0), .auto_out_1 (element_reset_domain_auto_rockettile_int_local_in_1_1) ); // @[Crossing.scala:109:29] IntSyncSyncCrossingSink_n1x1 intsink_2 ( // @[Crossing.scala:109:29] .auto_in_sync_0 (intInClockXingOut_1_sync_0), // @[MixedNode.scala:542:17] .auto_out_0 (element_reset_domain_auto_rockettile_int_local_in_2_0) ); // @[Crossing.scala:109:29] IntSyncSyncCrossingSink_n1x1_1 intsink_3 ( // @[Crossing.scala:109:29] .auto_in_sync_0 (intInClockXingOut_2_sync_0), // @[MixedNode.scala:542:17] .auto_out_0 (element_reset_domain_auto_rockettile_int_local_in_3_0) ); // @[Crossing.scala:109:29] IntSyncSyncCrossingSink_n1x1_2 intsink_4 (); // @[Crossing.scala:109:29] IntSyncCrossingSource_n1x1 intsource ( // @[Crossing.scala:29:31] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset) // @[LazyModuleImp.scala:158:31] ); // @[Crossing.scala:29:31] IntSyncSyncCrossingSink_n1x1_3 intsink_5 ( // @[Crossing.scala:109:29] .auto_in_sync_0 (intOutClockXingOut_2_sync_0), // @[MixedNode.scala:542:17] .auto_out_0 (auto_intsink_out_1_0_0) ); // @[Crossing.scala:109:29] IntSyncCrossingSource_n1x1_1 intsource_1 ( // @[Crossing.scala:29:31] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset), // @[LazyModuleImp.scala:158:31] .auto_in_0 (element_reset_domain_auto_rockettile_wfi_out_0), // @[ClockDomain.scala:14:9] .auto_out_sync_0 (intOutClockXingIn_3_sync_0) ); // @[Crossing.scala:29:31] IntSyncSyncCrossingSink_n1x1_4 intsink_6 (); // @[Crossing.scala:109:29] IntSyncCrossingSource_n1x1_2 intsource_2 ( // @[Crossing.scala:29:31] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset) // @[LazyModuleImp.scala:158:31] ); // @[Crossing.scala:29:31] assign auto_intsink_out_1_0 = auto_intsink_out_1_0_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_valid = auto_element_reset_domain_rockettile_trace_source_out_insns_0_valid_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_iaddr = auto_element_reset_domain_rockettile_trace_source_out_insns_0_iaddr_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_insn = auto_element_reset_domain_rockettile_trace_source_out_insns_0_insn_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_priv = auto_element_reset_domain_rockettile_trace_source_out_insns_0_priv_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_exception = auto_element_reset_domain_rockettile_trace_source_out_insns_0_exception_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_interrupt = auto_element_reset_domain_rockettile_trace_source_out_insns_0_interrupt_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_cause = auto_element_reset_domain_rockettile_trace_source_out_insns_0_cause_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_insns_0_tval = auto_element_reset_domain_rockettile_trace_source_out_insns_0_tval_0; // @[ClockDomain.scala:14:9] assign auto_element_reset_domain_rockettile_trace_source_out_time = auto_element_reset_domain_rockettile_trace_source_out_time_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_valid = auto_tl_master_clock_xing_out_a_valid_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_opcode = auto_tl_master_clock_xing_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_param = auto_tl_master_clock_xing_out_a_bits_param_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_size = auto_tl_master_clock_xing_out_a_bits_size_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_source = auto_tl_master_clock_xing_out_a_bits_source_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_address = auto_tl_master_clock_xing_out_a_bits_address_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_mask = auto_tl_master_clock_xing_out_a_bits_mask_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_data = auto_tl_master_clock_xing_out_a_bits_data_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_a_bits_corrupt = auto_tl_master_clock_xing_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_b_ready = auto_tl_master_clock_xing_out_b_ready_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_valid = auto_tl_master_clock_xing_out_c_valid_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_opcode = auto_tl_master_clock_xing_out_c_bits_opcode_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_param = auto_tl_master_clock_xing_out_c_bits_param_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_size = auto_tl_master_clock_xing_out_c_bits_size_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_source = auto_tl_master_clock_xing_out_c_bits_source_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_address = auto_tl_master_clock_xing_out_c_bits_address_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_data = auto_tl_master_clock_xing_out_c_bits_data_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_c_bits_corrupt = auto_tl_master_clock_xing_out_c_bits_corrupt_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_d_ready = auto_tl_master_clock_xing_out_d_ready_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_e_valid = auto_tl_master_clock_xing_out_e_valid_0; // @[ClockDomain.scala:14:9] assign auto_tl_master_clock_xing_out_e_bits_sink = auto_tl_master_clock_xing_out_e_bits_sink_0; // @[ClockDomain.scala:14:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File IngressUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ class IngressUnit( ingressNodeId: Int, cParam: IngressChannelParams, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams], combineRCVA: Boolean, combineSAST: Boolean, ) (implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) { class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) { val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits))) } val io = IO(new IngressUnitIO) val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2)) val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2, flow=combineRCVA)) assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR)) route_buffer.io.enq.bits.head := io.in.bits.head route_buffer.io.enq.bits.tail := io.in.bits.tail val flows = cParam.possibleFlows.toSeq if (flows.size == 0) { route_buffer.io.enq.bits.flow := DontCare } else { route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U route_buffer.io.enq.bits.flow.egress_node := Mux1H( flows.map(_.egressId.U === io.in.bits.egress_id), flows.map(_.egressNode.U) ) route_buffer.io.enq.bits.flow.egress_node_id := Mux1H( flows.map(_.egressId.U === io.in.bits.egress_id), flows.map(_.egressNodeId.U) ) } route_buffer.io.enq.bits.payload := io.in.bits.payload route_buffer.io.enq.bits.virt_channel_id := DontCare io.router_req.bits.src_virt_id := 0.U io.router_req.bits.flow := route_buffer.io.enq.bits.flow val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U route_buffer.io.enq.valid := io.in.valid && ( io.router_req.ready || !io.in.bits.head || at_dest) io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest io.in.ready := route_buffer.io.enq.ready && ( io.router_req.ready || !io.in.bits.head || at_dest) route_q.io.enq.valid := io.router_req.fire route_q.io.enq.bits := io.router_resp when (io.in.fire && io.in.bits.head && at_dest) { route_q.io.enq.valid := true.B route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B)) for (o <- 0 until nEgress) { when (egressParams(o).egressId.U === io.in.bits.egress_id) { route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B } } } assert(!(route_q.io.enq.valid && !route_q.io.enq.ready)) val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2)) val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams), 1, pipe=true)) vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow io.vcalloc_req.bits.in_vc := 0.U val head = route_buffer.io.deq.bits.head val tail = route_buffer.io.deq.bits.tail vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid && (route_q.io.deq.valid || !head) && (io.vcalloc_req.ready || !head) ) io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid && head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready) route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready && (route_q.io.deq.valid || !head) && (io.vcalloc_req.ready || !head) && (vcalloc_q.io.enq.ready || !head)) route_q.io.deq.ready := (route_buffer.io.deq.fire && tail) vcalloc_q.io.enq.valid := io.vcalloc_req.fire vcalloc_q.io.enq.bits := io.vcalloc_resp assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready)) io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire val out_bundle = if (combineSAST) { Wire(Valid(new SwitchBundle(outParams, egressParams))) } else { Reg(Valid(new SwitchBundle(outParams, egressParams))) } io.out(0) := out_bundle out_bundle.valid := vcalloc_buffer.io.deq.fire out_bundle.bits.flit := vcalloc_buffer.io.deq.bits out_bundle.bits.flit.virt_channel_id := 0.U val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh, vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq) io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready // TODO: We should not generate input/ingress/output/egress units for untraversable channels if (!cParam.traversable) { io.in.ready := false.B io.router_req.valid := false.B io.router_req.bits := DontCare io.vcalloc_req.valid := false.B io.vcalloc_req.bits := DontCare io.salloc_req.foreach(_.valid := false.B) io.salloc_req.foreach(_.bits := DontCare) io.out.foreach(_.valid := false.B) io.out.foreach(_.bits := DontCare) } }
module IngressUnit_13( // @[IngressUnit.scala:11:7] input clock, // @[IngressUnit.scala:11:7] input reset, // @[IngressUnit.scala:11:7] input io_vcalloc_req_ready, // @[IngressUnit.scala:24:14] output io_vcalloc_req_valid, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_2, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_3, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_4, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_5, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_6, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_7, // @[IngressUnit.scala:24:14] input io_out_credit_available_2_0, // @[IngressUnit.scala:24:14] input io_out_credit_available_1_0, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_0, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_1, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_2, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_3, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_4, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_5, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_6, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_7, // @[IngressUnit.scala:24:14] input io_salloc_req_0_ready, // @[IngressUnit.scala:24:14] output io_salloc_req_0_valid, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_tail, // @[IngressUnit.scala:24:14] output io_out_0_valid, // @[IngressUnit.scala:24:14] output io_out_0_bits_flit_head, // @[IngressUnit.scala:24:14] output io_out_0_bits_flit_tail, // @[IngressUnit.scala:24:14] output [72:0] io_out_0_bits_flit_payload, // @[IngressUnit.scala:24:14] output [2:0] io_out_0_bits_flit_flow_vnet_id, // @[IngressUnit.scala:24:14] output [4:0] io_out_0_bits_flit_flow_ingress_node, // @[IngressUnit.scala:24:14] output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[IngressUnit.scala:24:14] output [4:0] io_out_0_bits_flit_flow_egress_node, // @[IngressUnit.scala:24:14] output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[IngressUnit.scala:24:14] output [2:0] io_out_0_bits_out_virt_channel, // @[IngressUnit.scala:24:14] output io_in_ready, // @[IngressUnit.scala:24:14] input io_in_valid, // @[IngressUnit.scala:24:14] input io_in_bits_head, // @[IngressUnit.scala:24:14] input [72:0] io_in_bits_payload, // @[IngressUnit.scala:24:14] input [5:0] io_in_bits_egress_id // @[IngressUnit.scala:24:14] ); wire _GEN; // @[Decoupled.scala:51:35] wire _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_valid; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_2_0; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_1_0; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_0; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_1; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_2; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_3; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_4; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_5; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_6; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_7; // @[IngressUnit.scala:76:25] wire _vcalloc_buffer_io_enq_ready; // @[IngressUnit.scala:75:30] wire _vcalloc_buffer_io_deq_valid; // @[IngressUnit.scala:75:30] wire _vcalloc_buffer_io_deq_bits_head; // @[IngressUnit.scala:75:30] wire _vcalloc_buffer_io_deq_bits_tail; // @[IngressUnit.scala:75:30] wire [72:0] _vcalloc_buffer_io_deq_bits_payload; // @[IngressUnit.scala:75:30] wire [2:0] _vcalloc_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:75:30] wire [4:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:75:30] wire [1:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:75:30] wire [4:0] _vcalloc_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:75:30] wire [1:0] _vcalloc_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:75:30] wire _route_q_io_enq_ready; // @[IngressUnit.scala:27:23] wire _route_q_io_deq_valid; // @[IngressUnit.scala:27:23] wire _route_buffer_io_enq_ready; // @[IngressUnit.scala:26:28] wire _route_buffer_io_deq_valid; // @[IngressUnit.scala:26:28] wire _route_buffer_io_deq_bits_head; // @[IngressUnit.scala:26:28] wire _route_buffer_io_deq_bits_tail; // @[IngressUnit.scala:26:28] wire [72:0] _route_buffer_io_deq_bits_payload; // @[IngressUnit.scala:26:28] wire [2:0] _route_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:26:28] wire [4:0] _route_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:26:28] wire [1:0] _route_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:26:28] wire [4:0] _route_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:26:28] wire [1:0] _route_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:26:28] wire [2:0] _route_buffer_io_deq_bits_virt_channel_id; // @[IngressUnit.scala:26:28] wire _route_buffer_io_enq_bits_flow_egress_node_id_T = io_in_bits_egress_id == 6'h17; // @[IngressUnit.scala:30:72] wire _route_buffer_io_enq_bits_flow_egress_node_id_T_1 = io_in_bits_egress_id == 6'h1A; // @[IngressUnit.scala:30:72] wire _route_buffer_io_enq_bits_flow_egress_node_id_T_2 = io_in_bits_egress_id == 6'h1D; // @[IngressUnit.scala:30:72] wire _route_buffer_io_enq_bits_flow_egress_node_id_T_3 = io_in_bits_egress_id == 6'h20; // @[IngressUnit.scala:30:72] wire [3:0] _route_buffer_io_enq_bits_flow_egress_node_T_10 = {1'h0, (_route_buffer_io_enq_bits_flow_egress_node_id_T ? 3'h5 : 3'h0) | (_route_buffer_io_enq_bits_flow_egress_node_id_T_1 ? 3'h6 : 3'h0)} | (_route_buffer_io_enq_bits_flow_egress_node_id_T_2 ? 4'h9 : 4'h0) | (_route_buffer_io_enq_bits_flow_egress_node_id_T_3 ? 4'hA : 4'h0); // @[Mux.scala:30:73] assign _GEN = _route_buffer_io_enq_ready & io_in_valid & io_in_bits_head & _route_buffer_io_enq_bits_flow_egress_node_T_10 == 4'h4; // @[Mux.scala:30:73] wire route_q_io_enq_valid = _GEN | io_in_valid & _route_buffer_io_enq_ready & io_in_bits_head & _route_buffer_io_enq_bits_flow_egress_node_T_10 != 4'h4; // @[Mux.scala:30:73] wire io_vcalloc_req_valid_0 = _route_buffer_io_deq_valid & _route_q_io_deq_valid & _route_buffer_io_deq_bits_head & _vcalloc_buffer_io_enq_ready & _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :91:{54,78}, :92:{10,41}] wire route_buffer_io_deq_ready = _vcalloc_buffer_io_enq_ready & (_route_q_io_deq_valid | ~_route_buffer_io_deq_bits_head) & (io_vcalloc_req_ready | ~_route_buffer_io_deq_bits_head) & (_vcalloc_q_io_enq_ready | ~_route_buffer_io_deq_bits_head); // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :88:30, :93:61, :94:{27,37}, :95:{27,37}, :96:29] wire vcalloc_q_io_enq_valid = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35]
Generate the Verilog code corresponding to the following Chisel files. File InputUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ import constellation.routing.{FlowRoutingBundle} import constellation.noc.{HasNoCParams} class AbstractInputUnitIO( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams], )(implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val nodeId = cParam.destId val router_req = Decoupled(new RouteComputerReq) val router_resp = Input(new RouteComputerResp(outParams, egressParams)) val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams)) val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams)) val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })) val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams))) val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams))) val debug = Output(new Bundle { val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) }) val block = Input(Bool()) } abstract class AbstractInputUnit( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams { val nodeId = cParam.destId def io: AbstractInputUnitIO } class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module { val nVirtualChannels = cParam.nVirtualChannels val io = IO(new Bundle { val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits)))) val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits))) }) val useOutputQueues = cParam.useOutputQueues val delims = if (useOutputQueues) { cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_) } else { // If no queuing, have to add an additional slot since head == tail implies empty // TODO this should be fixed, should use all slots available cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_) } val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val ends = delims.tail.zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val fullSize = delims.last // Ugly case. Use multiple queues if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) { require(useOutputQueues) val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize))) qs.zipWithIndex.foreach { case (q,i) => val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U) q.io.enq.valid := sel.orR q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head)) q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail)) q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload)) io.deq(i) <> q.io.deq } } else { val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits)) val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val empty = (heads zip tails).map(t => t._1 === t._2) val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) } qs.foreach(_.io.enq.valid := false.B) qs.foreach(_.io.enq.bits := DontCare) val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id) val flit = Wire(new BaseFlit(cParam.payloadBits)) val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B flit.head := io.enq(0).bits.head flit.tail := io.enq(0).bits.tail flit.payload := io.enq(0).bits.payload when (io.enq(0).valid && !direct_to_q) { val tail = tails(io.enq(0).bits.virt_channel_id) mem.write(tail, flit) tails(io.enq(0).bits.virt_channel_id) := Mux( tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(vc_sel, starts.map(_.U)), tail + 1.U) } .elsewhen (io.enq(0).valid && direct_to_q) { for (i <- 0 until nVirtualChannels) { when (io.enq(0).bits.virt_channel_id === i.U) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := flit } } } if (useOutputQueues) { val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready } val to_q_oh = PriorityEncoderOH(can_to_q) val to_q = OHToUInt(to_q_oh) when (can_to_q.orR) { val head = Mux1H(to_q_oh, heads) heads(to_q) := Mux( head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(to_q_oh, starts.map(_.U)), head + 1.U) for (i <- 0 until nVirtualChannels) { when (to_q_oh(i)) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := mem.read(head) } } } for (i <- 0 until nVirtualChannels) { io.deq(i) <> qs(i).io.deq } } else { qs.map(_.io.deq.ready := false.B) val ready_sel = io.deq.map(_.ready) val fire = io.deq.map(_.fire) assert(PopCount(fire) <= 1.U) val head = Mux1H(fire, heads) when (fire.orR) { val fire_idx = OHToUInt(fire) heads(fire_idx) := Mux( head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(fire, starts.map(_.U)), head + 1.U) } val read_flit = mem.read(head) for (i <- 0 until nVirtualChannels) { io.deq(i).valid := !empty(i) io.deq(i).bits := read_flit } } } } class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams], combineRCVA: Boolean, combineSAST: Boolean ) (implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) { val nVirtualChannels = cParam.nVirtualChannels val virtualChannelParams = cParam.virtualChannelParams class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) { val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams])) } val io = IO(new InputUnitIO) val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5) class InputState extends Bundle { val g = UInt(3.W) val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) val flow = new FlowRoutingBundle val fifo_deps = UInt(nVirtualChannels.W) } val input_buffer = Module(new InputBuffer(cParam)) for (i <- 0 until cParam.srcSpeedup) { input_buffer.io.enq(i) := io.in.flit(i) } input_buffer.io.deq.foreach(_.ready := false.B) val route_arbiter = Module(new Arbiter( new RouteComputerReq, nVirtualChannels )) io.router_req <> route_arbiter.io.out val states = Reg(Vec(nVirtualChannels, new InputState)) val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_) val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_) if (anyFifo) { val idle_mask = VecInit(states.map(_.g === g_i)).asUInt for (s <- states) for (i <- 0 until nVirtualChannels) s.fifo_deps := s.fifo_deps & ~idle_mask } for (i <- 0 until cParam.srcSpeedup) { when (io.in.flit(i).fire && io.in.flit(i).bits.head) { val id = io.in.flit(i).bits.virt_channel_id assert(id < nVirtualChannels.U) assert(states(id).g === g_i) val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U states(id).g := Mux(at_dest, g_v, g_r) states(id).vc_sel.foreach(_.foreach(_ := false.B)) for (o <- 0 until nEgress) { when (o.U === io.in.flit(i).bits.flow.egress_node_id) { states(id).vc_sel(o+nOutputs)(0) := true.B } } states(id).flow := io.in.flit(i).bits.flow if (anyFifo) { val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) => s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id }).asUInt } } } (route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) => if (virtualChannelParams(idx).traversable) { i.valid := s.g === g_r i.bits.flow := s.flow i.bits.src_virt_id := idx.U when (i.fire) { s.g := g_v } } else { i.valid := false.B i.bits := DontCare } } when (io.router_req.fire) { val id = io.router_req.bits.src_virt_id assert(states(id).g === g_r) states(id).g := g_v for (i <- 0 until nVirtualChannels) { when (i.U === id) { states(i).vc_sel := io.router_resp.vc_sel } } } val mask = RegInit(0.U(nVirtualChannels.W)) val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams))) val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool())) val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask)) val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels) // Prioritize incoming packetes when (io.router_req.fire) { mask := (1.U << io.router_req.bits.src_virt_id) - 1.U } .elsewhen (vcalloc_vals.orR) { mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) }) } io.vcalloc_req.valid := vcalloc_vals.orR io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs) states.zipWithIndex.map { case (s,idx) => if (virtualChannelParams(idx).traversable) { vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U vcalloc_reqs(idx).in_vc := idx.U vcalloc_reqs(idx).vc_sel := s.vc_sel vcalloc_reqs(idx).flow := s.flow when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a } if (combineRCVA) { when (route_arbiter.io.in(idx).fire) { vcalloc_vals(idx) := true.B vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel } } } else { vcalloc_vals(idx) := false.B vcalloc_reqs(idx) := DontCare } } io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready when (io.vcalloc_req.fire) { for (i <- 0 until nVirtualChannels) { when (vcalloc_sel(i)) { states(i).vc_sel := io.vcalloc_resp.vc_sel states(i).g := g_a if (!combineRCVA) { assert(states(i).g === g_v) } } } } val salloc_arb = Module(new SwitchArbiter( nVirtualChannels, cParam.destSpeedup, outParams, egressParams )) (states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) => if (virtualChannelParams(i).traversable) { val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid r.bits.vc_sel := s.vc_sel val deq_tail = input_buffer.io.deq(i).bits.tail r.bits.tail := deq_tail when (r.fire && deq_tail) { s.g := g_i } input_buffer.io.deq(i).ready := r.ready } else { r.valid := false.B r.bits := DontCare } } io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready)) io.salloc_req <> salloc_arb.io.out when (io.block) { salloc_arb.io.out.foreach(_.ready := false.B) io.salloc_req.foreach(_.valid := false.B) } class OutBundle extends Bundle { val valid = Bool() val vid = UInt(virtualChannelBits.W) val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W) val flit = new Flit(cParam.payloadBits) } val salloc_outs = if (combineSAST) { Wire(Vec(cParam.destSpeedup, new OutBundle)) } else { Reg(Vec(cParam.destSpeedup, new OutBundle)) } io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)), salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) for (i <- 0 until cParam.destSpeedup) { val salloc_out = salloc_outs(i) salloc_out.valid := salloc_arb.io.out(i).fire salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i)) val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel)) val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq) when (salloc_arb.io.out(i).fire) { salloc_out.out_vid := virt_channel salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload)) salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head)) salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)) salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow)) } .otherwise { salloc_out.out_vid := DontCare salloc_out.flit := DontCare } salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch io.out(i).valid := salloc_out.valid io.out(i).bits.flit := salloc_out.flit io.out(i).bits.out_virt_channel := salloc_out.out_vid } def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = { if (virtualChannelParams(srcV).traversable) { outParams.zipWithIndex.map { case (oP, oI) => (0 until oP.nVirtualChannels).map { oV => var allow = false virtualChannelParams(srcV).possibleFlows.foreach { pI => allow = allow || routingRelation( cParam.channelRoutingInfos(srcV), oP.channelRoutingInfos(oV), pI ) } if (!allow) sel(oI)(oV) := false.B } } } } (0 until nVirtualChannels).map { i => if (!virtualChannelParams(i).traversable) states(i) := DontCare filterVCSel(states(i).vc_sel, i) } when (reset.asBool) { states.foreach(_.g := g_i) } }
module InputUnit_32( // @[InputUnit.scala:158:7] input clock, // @[InputUnit.scala:158:7] input reset, // @[InputUnit.scala:158:7] output [1:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14] output [1:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14] output [3:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14] output [2:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [3:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14] output [1:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_0, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_2, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_2_0, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_2_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_2_2, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_0, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_2, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_0, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_2, // @[InputUnit.scala:170:14] input io_vcalloc_req_ready, // @[InputUnit.scala:170:14] output io_vcalloc_req_valid, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_5_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_4_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_2, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_2_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_2_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_2_2, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_2, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_2, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_5_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_4_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_2_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_0_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_0_1, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_0_2, // @[InputUnit.scala:170:14] input io_out_credit_available_5_0, // @[InputUnit.scala:170:14] input io_out_credit_available_4_0, // @[InputUnit.scala:170:14] input io_out_credit_available_3_0, // @[InputUnit.scala:170:14] input io_out_credit_available_2_0, // @[InputUnit.scala:170:14] input io_out_credit_available_1_0, // @[InputUnit.scala:170:14] input io_out_credit_available_1_1, // @[InputUnit.scala:170:14] input io_out_credit_available_1_2, // @[InputUnit.scala:170:14] input io_out_credit_available_0_0, // @[InputUnit.scala:170:14] input io_out_credit_available_0_1, // @[InputUnit.scala:170:14] input io_out_credit_available_0_2, // @[InputUnit.scala:170:14] input io_salloc_req_0_ready, // @[InputUnit.scala:170:14] output io_salloc_req_0_valid, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_5_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_4_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14] output io_out_0_valid, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14] output [144:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14] output [1:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14] output [3:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14] output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [3:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14] output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14] output [1:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14] output [1:0] io_debug_va_stall, // @[InputUnit.scala:170:14] output [1:0] io_debug_sa_stall, // @[InputUnit.scala:170:14] input io_in_flit_0_valid, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14] input [144:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14] input [1:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14] input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14] input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] input [3:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14] input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input [1:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14] output [2:0] io_in_credit_return, // @[InputUnit.scala:170:14] output [2:0] io_in_vc_free // @[InputUnit.scala:170:14] ); wire _GEN; // @[MixedVec.scala:116:9] wire _GEN_0; // @[MixedVec.scala:116:9] wire vcalloc_reqs_2_vc_sel_0_2; // @[MixedVec.scala:116:9] wire vcalloc_vals_2; // @[InputUnit.scala:266:25, :272:46, :273:29] wire _GEN_1; // @[MixedVec.scala:116:9] wire _GEN_2; // @[MixedVec.scala:116:9] wire vcalloc_reqs_1_vc_sel_0_1; // @[MixedVec.scala:116:9] wire vcalloc_vals_1; // @[InputUnit.scala:266:25, :272:46, :273:29] wire _GEN_3; // @[MixedVec.scala:116:9] wire _GEN_4; // @[MixedVec.scala:116:9] wire vcalloc_reqs_0_vc_sel_3_0; // @[MixedVec.scala:116:9] wire vcalloc_reqs_0_vc_sel_2_0; // @[MixedVec.scala:116:9] wire vcalloc_reqs_0_vc_sel_0_0; // @[MixedVec.scala:116:9] wire vcalloc_vals_0; // @[InputUnit.scala:266:25, :272:46, :273:29] wire _salloc_arb_io_in_0_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_1_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_2_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26] wire [2:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26] wire _route_arbiter_io_in_1_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_2_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29] wire [1:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29] wire _input_buffer_io_deq_0_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28] wire [144:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28] wire [144:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28] wire [144:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28] reg [2:0] states_0_g; // @[InputUnit.scala:192:19] reg states_0_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_0_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_0_vc_sel_3_0; // @[InputUnit.scala:192:19] reg states_0_vc_sel_2_0; // @[InputUnit.scala:192:19] reg states_0_vc_sel_0_0; // @[InputUnit.scala:192:19] reg [1:0] states_0_flow_vnet_id; // @[InputUnit.scala:192:19] reg [3:0] states_0_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_0_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [3:0] states_0_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_0_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_1_g; // @[InputUnit.scala:192:19] reg states_1_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_1_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_1_vc_sel_0_1; // @[InputUnit.scala:192:19] reg [1:0] states_1_flow_vnet_id; // @[InputUnit.scala:192:19] reg [3:0] states_1_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_1_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [3:0] states_1_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_1_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_2_g; // @[InputUnit.scala:192:19] reg states_2_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_2_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_2_vc_sel_0_2; // @[InputUnit.scala:192:19] reg [1:0] states_2_flow_vnet_id; // @[InputUnit.scala:192:19] reg [3:0] states_2_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_2_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [3:0] states_2_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_2_flow_egress_node_id; // @[InputUnit.scala:192:19] wire _GEN_5 = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30] wire route_arbiter_io_in_0_valid = states_0_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22] wire route_arbiter_io_in_1_valid = states_1_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22] wire _GEN_6 = _route_arbiter_io_in_1_ready & route_arbiter_io_in_1_valid; // @[Decoupled.scala:51:35] wire route_arbiter_io_in_2_valid = states_2_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22] wire _GEN_7 = _route_arbiter_io_in_2_ready & route_arbiter_io_in_2_valid; // @[Decoupled.scala:51:35]
Generate the Verilog code corresponding to the following Chisel files. File FIFOFixer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.lazymodule._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.diplomacy.RegionType import freechips.rocketchip.util.property class TLFIFOFixer(policy: TLFIFOFixer.Policy = TLFIFOFixer.all)(implicit p: Parameters) extends LazyModule { private def fifoMap(seq: Seq[TLSlaveParameters]) = { val (flatManagers, keepManagers) = seq.partition(policy) // We need to be careful if one flatManager and one keepManager share an existing domain // Erring on the side of caution, we will also flatten the keepManager in this case val flatDomains = Set(flatManagers.flatMap(_.fifoId):_*) // => ID 0 val keepDomains = Set(keepManagers.flatMap(_.fifoId):_*) -- flatDomains // => IDs compacted // Calculate what the FIFO domains look like after the fixer is applied val flatMap = flatDomains.map { x => (x, 0) }.toMap val keepMap = keepDomains.scanLeft((-1,0)) { case ((_,s),x) => (x, s+1) }.toMap val map = flatMap ++ keepMap val fixMap = seq.map { m => m.fifoId match { case None => if (policy(m)) Some(0) else None case Some(id) => Some(map(id)) // also flattens some who did not ask } } // Compress the FIFO domain space of those we are combining val reMap = flatDomains.scanLeft((-1,-1)) { case ((_,s),x) => (x, s+1) }.toMap val splatMap = seq.map { m => m.fifoId match { case None => None case Some(id) => reMap.lift(id) } } (fixMap, splatMap) } val node = new AdapterNode(TLImp)( { cp => cp }, { mp => val (fixMap, _) = fifoMap(mp.managers) mp.v1copy(managers = (fixMap zip mp.managers) map { case (id, m) => m.v1copy(fifoId = id) }) }) with TLFormatNode { override def circuitIdentity = edges.in.map(_.client.clients.filter(c => c.requestFifo && c.sourceId.size > 1).size).sum == 0 } lazy val module = new Impl class Impl extends LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => val (fixMap, splatMap) = fifoMap(edgeOut.manager.managers) // Do we need to serialize the request to this manager? val a_notFIFO = edgeIn.manager.fastProperty(in.a.bits.address, _.fifoId != Some(0), (b:Boolean) => b.B) // Compact the IDs of the cases we serialize val compacted = ((fixMap zip splatMap) zip edgeOut.manager.managers) flatMap { case ((f, s), m) => if (f == Some(0)) Some(m.v1copy(fifoId = s)) else None } val sinks = if (compacted.exists(_.supportsAcquireB)) edgeOut.manager.endSinkId else 0 val a_id = if (compacted.isEmpty) 0.U else edgeOut.manager.v1copy(managers = compacted, endSinkId = sinks).findFifoIdFast(in.a.bits.address) val a_noDomain = a_id === 0.U if (false) { println(s"FIFOFixer for: ${edgeIn.client.clients.map(_.name).mkString(", ")}") println(s"make FIFO: ${edgeIn.manager.managers.filter(_.fifoId==Some(0)).map(_.name).mkString(", ")}") println(s"not FIFO: ${edgeIn.manager.managers.filter(_.fifoId!=Some(0)).map(_.name).mkString(", ")}") println(s"domains: ${compacted.groupBy(_.name).mapValues(_.map(_.fifoId))}") println("") } // Count beats val a_first = edgeIn.first(in.a) val d_first = edgeOut.first(out.d) && out.d.bits.opcode =/= TLMessages.ReleaseAck // Keep one bit for each source recording if there is an outstanding request that must be made FIFO // Sources unused in the stall signal calculation should be pruned by DCE val flight = RegInit(VecInit(Seq.fill(edgeIn.client.endSourceId) { false.B })) when (a_first && in.a.fire) { flight(in.a.bits.source) := !a_notFIFO } when (d_first && in.d.fire) { flight(in.d.bits.source) := false.B } val stalls = edgeIn.client.clients.filter(c => c.requestFifo && c.sourceId.size > 1).map { c => val a_sel = c.sourceId.contains(in.a.bits.source) val id = RegEnable(a_id, in.a.fire && a_sel && !a_notFIFO) val track = flight.slice(c.sourceId.start, c.sourceId.end) a_sel && a_first && track.reduce(_ || _) && (a_noDomain || id =/= a_id) } val stall = stalls.foldLeft(false.B)(_||_) out.a <> in.a in.d <> out.d out.a.valid := in.a.valid && (a_notFIFO || !stall) in.a.ready := out.a.ready && (a_notFIFO || !stall) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> out.b out.c <> in .c out.e <> in .e } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } //Functional cover properties property.cover(in.a.valid && stall, "COVER FIFOFIXER STALL", "Cover: Stall occured for a valid transaction") val SourceIdFIFOed = RegInit(0.U(edgeIn.client.endSourceId.W)) val SourceIdSet = WireDefault(0.U(edgeIn.client.endSourceId.W)) val SourceIdClear = WireDefault(0.U(edgeIn.client.endSourceId.W)) when (a_first && in.a.fire && !a_notFIFO) { SourceIdSet := UIntToOH(in.a.bits.source) } when (d_first && in.d.fire) { SourceIdClear := UIntToOH(in.d.bits.source) } SourceIdFIFOed := SourceIdFIFOed | SourceIdSet val allIDs_FIFOed = SourceIdFIFOed===Fill(SourceIdFIFOed.getWidth, 1.U) property.cover(allIDs_FIFOed, "COVER all sources", "Cover: FIFOFIXER covers all Source IDs") //property.cover(flight.reduce(_ && _), "COVER full", "Cover: FIFO is full with all Source IDs") property.cover(!(flight.reduce(_ || _)), "COVER empty", "Cover: FIFO is empty") property.cover(SourceIdSet > 0.U, "COVER at least one push", "Cover: At least one Source ID is pushed") property.cover(SourceIdClear > 0.U, "COVER at least one pop", "Cover: At least one Source ID is popped") } } } object TLFIFOFixer { // Which slaves should have their FIFOness combined? // NOTE: this transformation is still only applied for masters with requestFifo type Policy = TLSlaveParameters => Boolean import RegionType._ val all: Policy = m => true val allFIFO: Policy = m => m.fifoId.isDefined val allVolatile: Policy = m => m.regionType <= VOLATILE def apply(policy: Policy = all)(implicit p: Parameters): TLNode = { val fixer = LazyModule(new TLFIFOFixer(policy)) fixer.node } } File Buffer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.BufferParams class TLBufferNode ( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit valName: ValName) extends TLAdapterNode( clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) }, managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) } ) { override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}" override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none) } class TLBuffer( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters) extends LazyModule { def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace) def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde) def this()(implicit p: Parameters) = this(BufferParams.default) val node = new TLBufferNode(a, b, c, d, e) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def headBundle = node.out.head._2.bundle override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.a <> a(in .a) in .d <> d(out.d) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> b(out.b) out.c <> c(in .c) out.e <> e(in .e) } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLBuffer { def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default) def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde) def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace) def apply( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters): TLNode = { val buffer = LazyModule(new TLBuffer(a, b, c, d, e)) buffer.node } def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = { val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) } name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } } buffers.map(_.node) } def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = { chain(depth, name) .reduceLeftOption(_ :*=* _) .getOrElse(TLNameNode("no_buffer")) } } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File MemoryBus.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.subsystem import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.devices.tilelink.{BuiltInDevices, HasBuiltInDeviceParams, BuiltInErrorDeviceParams, BuiltInZeroDeviceParams} import freechips.rocketchip.tilelink.{ ReplicatedRegion, HasTLBusParams, HasRegionReplicatorParams, TLBusWrapper, TLBusWrapperInstantiationLike, RegionReplicator, TLXbar, TLInwardNode, TLOutwardNode, ProbePicker, TLEdge, TLFIFOFixer } import freechips.rocketchip.util.Location /** Parameterization of the memory-side bus created for each memory channel */ case class MemoryBusParams( beatBytes: Int, blockBytes: Int, dtsFrequency: Option[BigInt] = None, zeroDevice: Option[BuiltInZeroDeviceParams] = None, errorDevice: Option[BuiltInErrorDeviceParams] = None, replication: Option[ReplicatedRegion] = None) extends HasTLBusParams with HasBuiltInDeviceParams with HasRegionReplicatorParams with TLBusWrapperInstantiationLike { def instantiate(context: HasTileLinkLocations, loc: Location[TLBusWrapper])(implicit p: Parameters): MemoryBus = { val mbus = LazyModule(new MemoryBus(this, loc.name)) mbus.suggestName(loc.name) context.tlBusWrapperLocationMap += (loc -> mbus) mbus } } /** Wrapper for creating TL nodes from a bus connected to the back of each mem channel */ class MemoryBus(params: MemoryBusParams, name: String = "memory_bus")(implicit p: Parameters) extends TLBusWrapper(params, name)(p) { private val replicator = params.replication.map(r => LazyModule(new RegionReplicator(r))) val prefixNode = replicator.map { r => r.prefix := addressPrefixNexusNode addressPrefixNexusNode } private val xbar = LazyModule(new TLXbar(nameSuffix = Some(name))).suggestName(busName + "_xbar") val inwardNode: TLInwardNode = replicator.map(xbar.node :*=* TLFIFOFixer(TLFIFOFixer.all) :*=* _.node) .getOrElse(xbar.node :*=* TLFIFOFixer(TLFIFOFixer.all)) val outwardNode: TLOutwardNode = ProbePicker() :*= xbar.node def busView: TLEdge = xbar.node.edges.in.head val builtInDevices: BuiltInDevices = BuiltInDevices.attach(params, outwardNode) } File ClockDomain.scala: package freechips.rocketchip.prci import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing { def clockBundle: ClockBundle lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { childClock := clockBundle.clock childReset := clockBundle.reset override def provideImplicitClockToLazyChildren = true // these are just for backwards compatibility with external devices // that were manually wiring themselves to the domain's clock/reset input: val clock = IO(Output(chiselTypeOf(clockBundle.clock))) val reset = IO(Output(chiselTypeOf(clockBundle.reset))) clock := clockBundle.clock reset := clockBundle.reset } } abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain { def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name)) val clockNode = ClockSinkNode(Seq(clockSinkParams)) def clockBundle = clockNode.in.head._1 override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString } class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain { def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name)) val clockNode = ClockSourceNode(Seq(clockSourceParams)) def clockBundle = clockNode.out.head._1 override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString } abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing File ClockGroup.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.prci import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.resources.FixedClockResource case class ClockGroupingNode(groupName: String)(implicit valName: ValName) extends MixedNexusNode(ClockGroupImp, ClockImp)( dFn = { _ => ClockSourceParameters() }, uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) }) { override def circuitIdentity = outputs.size == 1 } class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule { val node = ClockGroupingNode(groupName) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in(0) val (out, _) = node.out.unzip require (node.in.size == 1) require (in.member.size == out.size) (in.member.data zip out) foreach { case (i, o) => o := i } } } object ClockGroup { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node } case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName) extends NexusNode(ClockGroupImp)( dFn = { _ => ClockGroupSourceParameters() }, uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))}) { override def circuitIdentity = outputs.size == 1 } class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule { val node = ClockGroupAggregateNode(groupName) override lazy val desiredName = s"ClockGroupAggregator_$groupName" lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in.unzip val (out, _) = node.out.unzip val outputs = out.flatMap(_.member.data) require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1") require (in.head.member.size == outputs.size) in.head.member.data.zip(outputs).foreach { case (i, o) => o := i } } } object ClockGroupAggregator { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node } class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule { val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() }) lazy val module = new Impl class Impl extends LazyModuleImp(this) { val (out, _) = node.out.unzip out.map { out: ClockGroupBundle => out.member.data.foreach { o => o.clock := clock; o.reset := reset } } } } object SimpleClockGroupSource { def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node } case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName) extends NexusNode(ClockImp)( dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) }, uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) }, inputRequiresOutput = false) { def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix))) } class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule { val node = new FixedClockBroadcastNode(fixedClockOpt) { override def circuitIdentity = outputs.size == 1 } lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in(0) val (out, _) = node.out.unzip override def desiredName = s"FixedClockBroadcast_${out.size}" require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock") out.foreach { _ := in } } } object FixedClockBroadcast { def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node } case class PRCIClockGroupNode()(implicit valName: ValName) extends NexusNode(ClockGroupImp)( dFn = { _ => ClockGroupSourceParameters() }, uFn = { _ => ClockGroupSinkParameters("prci", Nil) }, outputRequiresInput = false) File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } } File ProbePicker.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressSet, IdRange} /* A ProbePicker is used to unify multiple cache banks into one logical cache */ class ProbePicker(implicit p: Parameters) extends LazyModule { val node = TLAdapterNode( clientFn = { p => // The ProbePicker assembles multiple clients based on the assumption they are contiguous in the clients list // This should be true for custers of xbar :=* BankBinder connections def combine(next: TLMasterParameters, pair: (TLMasterParameters, Seq[TLMasterParameters])) = { val (head, output) = pair if (head.visibility.exists(x => next.visibility.exists(_.overlaps(x)))) { (next, head +: output) // pair is not banked, push head without merging } else { def redact(x: TLMasterParameters) = x.v1copy(sourceId = IdRange(0,1), nodePath = Nil, visibility = Seq(AddressSet(0, ~0))) require (redact(next) == redact(head), s"${redact(next)} != ${redact(head)}") val merge = head.v1copy( sourceId = IdRange( head.sourceId.start min next.sourceId.start, head.sourceId.end max next.sourceId.end), visibility = AddressSet.unify(head.visibility ++ next.visibility)) (merge, output) } } val myNil: Seq[TLMasterParameters] = Nil val (head, output) = p.clients.init.foldRight((p.clients.last, myNil))(combine) p.v1copy(clients = head +: output) }, managerFn = { p => p }) lazy val module = new Impl class Impl extends LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out <> in // Based on address, adjust source to route to the correct bank if (edgeIn.client.clients.size != edgeOut.client.clients.size) { in.b.bits.source := Mux1H( edgeOut.client.clients.map(_.sourceId contains out.b.bits.source), edgeOut.client.clients.map { c => val banks = edgeIn.client.clients.filter(c.sourceId contains _.sourceId) if (banks.size == 1) { out.b.bits.source // allow sharing the value between single-bank cases } else { Mux1H( banks.map(_.visibility.map(_ contains out.b.bits.address).reduce(_ || _)), banks.map(_.sourceId.start.U)) } } ) } } } } object ProbePicker { def apply()(implicit p: Parameters): TLNode = { val picker = LazyModule(new ProbePicker) picker.node } } File LazyScope.scala: package org.chipsalliance.diplomacy.lazymodule import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.ValName /** Allows dynamic creation of [[Module]] hierarchy and "shoving" logic into a [[LazyModule]]. */ trait LazyScope { this: LazyModule => override def toString: String = s"LazyScope named $name" /** Evaluate `body` in the current [[LazyModule.scope]] */ def apply[T](body: => T): T = { // Preserve the previous value of the [[LazyModule.scope]], because when calling [[apply]] function, // [[LazyModule.scope]] will be altered. val saved = LazyModule.scope // [[LazyModule.scope]] stack push. LazyModule.scope = Some(this) // Evaluate [[body]] in the current `scope`, saving the result to [[out]]. val out = body // Check that the `scope` after evaluating `body` is the same as when we started. require(LazyModule.scope.isDefined, s"LazyScope $name tried to exit, but scope was empty!") require( LazyModule.scope.get eq this, s"LazyScope $name exited before LazyModule ${LazyModule.scope.get.name} was closed" ) // [[LazyModule.scope]] stack pop. LazyModule.scope = saved out } } /** Used to automatically create a level of module hierarchy (a [[SimpleLazyModule]]) within which [[LazyModule]]s can * be instantiated and connected. * * It will instantiate a [[SimpleLazyModule]] to manage evaluation of `body` and evaluate `body` code snippets in this * scope. */ object LazyScope { /** Create a [[LazyScope]] with an implicit instance name. * * @param body * code executed within the generated [[SimpleLazyModule]]. * @param valName * instance name of generated [[SimpleLazyModule]]. * @param p * [[Parameters]] propagated to [[SimpleLazyModule]]. */ def apply[T]( body: => T )( implicit valName: ValName, p: Parameters ): T = { apply(valName.value, "SimpleLazyModule", None)(body)(p) } /** Create a [[LazyScope]] with an explicitly defined instance name. * * @param name * instance name of generated [[SimpleLazyModule]]. * @param body * code executed within the generated `SimpleLazyModule` * @param p * [[Parameters]] propagated to [[SimpleLazyModule]]. */ def apply[T]( name: String )(body: => T )( implicit p: Parameters ): T = { apply(name, "SimpleLazyModule", None)(body)(p) } /** Create a [[LazyScope]] with an explicit instance and class name, and control inlining. * * @param name * instance name of generated [[SimpleLazyModule]]. * @param desiredModuleName * class name of generated [[SimpleLazyModule]]. * @param overrideInlining * tell FIRRTL that this [[SimpleLazyModule]]'s module should be inlined. * @param body * code executed within the generated `SimpleLazyModule` * @param p * [[Parameters]] propagated to [[SimpleLazyModule]]. */ def apply[T]( name: String, desiredModuleName: String, overrideInlining: Option[Boolean] = None )(body: => T )( implicit p: Parameters ): T = { val scope = LazyModule(new SimpleLazyModule with LazyScope { override lazy val desiredName = desiredModuleName override def shouldBeInlined = overrideInlining.getOrElse(super.shouldBeInlined) }).suggestName(name) scope { body } } /** Create a [[LazyScope]] to temporarily group children for some reason, but tell Firrtl to inline it. * * For example, we might want to control a set of children's clocks but then not keep the parent wrapper. * * @param body * code executed within the generated `SimpleLazyModule` * @param p * [[Parameters]] propagated to [[SimpleLazyModule]]. */ def inline[T]( body: => T )( implicit p: Parameters ): T = { apply("noname", "ShouldBeInlined", Some(false))(body)(p) } } File Xbar.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue} import freechips.rocketchip.util.BundleField // Trades off slave port proximity against routing resource cost object ForceFanout { def apply[T]( a: TriStateValue = TriStateValue.unset, b: TriStateValue = TriStateValue.unset, c: TriStateValue = TriStateValue.unset, d: TriStateValue = TriStateValue.unset, e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) = { body(p.alterPartial { case ForceFanoutKey => p(ForceFanoutKey) match { case ForceFanoutParams(pa, pb, pc, pd, pe) => ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe)) } }) } } private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean) private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false)) class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule { val node = new TLNexusNode( clientFn = { seq => seq(0).v1copy( echoFields = BundleField.union(seq.flatMap(_.echoFields)), requestFields = BundleField.union(seq.flatMap(_.requestFields)), responseKeys = seq.flatMap(_.responseKeys).distinct, minLatency = seq.map(_.minLatency).min, clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) => port.clients map { client => client.v1copy( sourceId = client.sourceId.shift(range.start) )} } ) }, managerFn = { seq => val fifoIdFactory = TLXbar.relabeler() seq(0).v1copy( responseFields = BundleField.union(seq.flatMap(_.responseFields)), requestKeys = seq.flatMap(_.requestKeys).distinct, minLatency = seq.map(_.minLatency).min, endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max, managers = seq.flatMap { port => require (port.beatBytes == seq(0).beatBytes, s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B") val fifoIdMapper = fifoIdFactory() port.managers map { manager => manager.v1copy( fifoId = manager.fifoId.map(fifoIdMapper(_)) )} } ) } ){ override def circuitIdentity = outputs.size == 1 && inputs.size == 1 } lazy val module = new Impl class Impl extends LazyModuleImp(this) { if ((node.in.size * node.out.size) > (8*32)) { println (s"!!! WARNING !!!") println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.") println (s"!!! WARNING !!!") } val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle)) override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_") TLXbar.circuit(policy, node.in, node.out) } } object TLXbar { def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId)) def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId)) def assignRanges(sizes: Seq[Int]) = { val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) } val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions val ranges = (tuples zip starts) map { case ((sz, i), st) => (if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i) } ranges.sortBy(_._2).map(_._1) // Restore orignal order } def relabeler() = { var idFactory = 0 () => { val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int] (x: Int) => { if (fifoMap.contains(x)) fifoMap(x) else { val out = idFactory idFactory = idFactory + 1 fifoMap += (x -> out) out } } } } def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) { val (io_in, edgesIn) = seqIn.unzip val (io_out, edgesOut) = seqOut.unzip // Not every master need connect to every slave on every channel; determine which connections are necessary val reachableIO = edgesIn.map { cp => edgesOut.map { mp => cp.client.clients.exists { c => mp.manager.managers.exists { m => c.visibility.exists { ca => m.address.exists { ma => ca.overlaps(ma)}}}} }.toVector}.toVector val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) => (edgesOut zip reachableO).map { case (mp, reachable) => reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED) }.toVector}.toVector val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) => (edgesOut zip reachableO).map { case (mp, reachable) => reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB }.toVector}.toVector val connectAIO = reachableIO val connectBIO = probeIO val connectCIO = releaseIO val connectDIO = reachableIO val connectEIO = releaseIO def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } } val connectAOI = transpose(connectAIO) val connectBOI = transpose(connectBIO) val connectCOI = transpose(connectCIO) val connectDOI = transpose(connectDIO) val connectEOI = transpose(connectEIO) // Grab the port ID mapping val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client)) val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager)) // We need an intermediate size of bundle with the widest possible identifiers val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params)) // Handle size = 1 gracefully (Chisel3 empty range is broken) def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0) // Transform input bundle sources (sinks use global namespace on both sides) val in = Wire(Vec(io_in.size, TLBundle(wide_bundle))) for (i <- 0 until in.size) { val r = inputIdRanges(i) if (connectAIO(i).exists(x=>x)) { in(i).a.bits.user := DontCare in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll in(i).a.bits.source := io_in(i).a.bits.source | r.start.U } else { in(i).a := DontCare io_in(i).a := DontCare in(i).a.valid := false.B io_in(i).a.ready := true.B } if (connectBIO(i).exists(x=>x)) { io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size) } else { in(i).b := DontCare io_in(i).b := DontCare in(i).b.ready := true.B io_in(i).b.valid := false.B } if (connectCIO(i).exists(x=>x)) { in(i).c.bits.user := DontCare in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll in(i).c.bits.source := io_in(i).c.bits.source | r.start.U } else { in(i).c := DontCare io_in(i).c := DontCare in(i).c.valid := false.B io_in(i).c.ready := true.B } if (connectDIO(i).exists(x=>x)) { io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size) } else { in(i).d := DontCare io_in(i).d := DontCare in(i).d.ready := true.B io_in(i).d.valid := false.B } if (connectEIO(i).exists(x=>x)) { in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll } else { in(i).e := DontCare io_in(i).e := DontCare in(i).e.valid := false.B io_in(i).e.ready := true.B } } // Transform output bundle sinks (sources use global namespace on both sides) val out = Wire(Vec(io_out.size, TLBundle(wide_bundle))) for (o <- 0 until out.size) { val r = outputIdRanges(o) if (connectAOI(o).exists(x=>x)) { out(o).a.bits.user := DontCare io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll } else { out(o).a := DontCare io_out(o).a := DontCare out(o).a.ready := true.B io_out(o).a.valid := false.B } if (connectBOI(o).exists(x=>x)) { out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll } else { out(o).b := DontCare io_out(o).b := DontCare out(o).b.valid := false.B io_out(o).b.ready := true.B } if (connectCOI(o).exists(x=>x)) { out(o).c.bits.user := DontCare io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll } else { out(o).c := DontCare io_out(o).c := DontCare out(o).c.ready := true.B io_out(o).c.valid := false.B } if (connectDOI(o).exists(x=>x)) { out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U } else { out(o).d := DontCare io_out(o).d := DontCare out(o).d.valid := false.B io_out(o).d.ready := true.B } if (connectEOI(o).exists(x=>x)) { io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size) } else { out(o).e := DontCare io_out(o).e := DontCare out(o).e.ready := true.B io_out(o).e.valid := false.B } } // Filter a list to only those elements selected def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1) // Based on input=>output connectivity, create per-input minimal address decode circuits val requiredAC = (connectAIO ++ connectCIO).distinct val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO => val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address)) val routingMask = AddressDecoder(filter(port_addrs, connectO)) val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct)) // Print the address mapping if (false) { println("Xbar mapping:") route_addrs.foreach { p => print(" ") p.foreach { a => print(s" ${a}") } println("") } println("--") } (connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _))) }.toMap // Print the ID mapping if (false) { println(s"XBar mapping:") (edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) => println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}") } println("") } val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) } val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) } def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } } val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } } val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } } val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } } val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } } val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) } val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) } val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) } val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) } val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) } // Fanout the input sources to the output sinks val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) }) val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) }) val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) }) val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) }) val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) }) // Arbitrate amongst the sources for (o <- 0 until out.size) { TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*) TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*) TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*) filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B } filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B } filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B } } for (i <- 0 until in.size) { TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*) TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*) filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B } filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B } } } def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode = { val xbar = LazyModule(new TLXbar(policy, nameSuffix)) xbar.node } // Replicate an input port to each output port def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = { val filtered = Wire(Vec(select.size, chiselTypeOf(input))) for (i <- 0 until select.size) { filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits) filtered(i).valid := input.valid && (select(i) || (select.size == 1).B) } input.ready := Mux1H(select, filtered.map(_.ready)) filtered } } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("Xbar")) val xbar = LazyModule(new TLXbar) xbar.node := TLDelayer(0.1) := model.node := fuzz.node (0 until nManagers) foreach { n => val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff))) ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node } lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module) dut.io.start := io.start io.finished := dut.io.finished } class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val xbar = LazyModule(new TLXbar) val fuzzers = (0 until nClients) map { n => val fuzz = LazyModule(new TLFuzzer(txns)) xbar.node := TLDelayer(0.1) := fuzz.node fuzz } (0 until nManagers) foreach { n => val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff))) ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node } lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzzers.last.module.io.finished } } class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module) dut.io.start := io.start io.finished := dut.io.finished }
module MemoryBus( // @[ClockDomain.scala:14:9] input auto_buffer_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_buffer_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [5:0] auto_buffer_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [27:0] auto_buffer_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_buffer_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_buffer_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_buffer_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_buffer_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_buffer_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_buffer_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [5:0] auto_buffer_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_buffer_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_buffer_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_ready, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_valid, // @[LazyModuleImp.scala:107:25] output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_id, // @[LazyModuleImp.scala:107:25] output [31:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_addr, // @[LazyModuleImp.scala:107:25] output [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_len, // @[LazyModuleImp.scala:107:25] output [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_burst, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_lock, // @[LazyModuleImp.scala:107:25] output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_cache, // @[LazyModuleImp.scala:107:25] output [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_prot, // @[LazyModuleImp.scala:107:25] output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_qos, // @[LazyModuleImp.scala:107:25] input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_ready, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_valid, // @[LazyModuleImp.scala:107:25] output [63:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_data, // @[LazyModuleImp.scala:107:25] output [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_strb, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_last, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_ready, // @[LazyModuleImp.scala:107:25] input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_valid, // @[LazyModuleImp.scala:107:25] input [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_id, // @[LazyModuleImp.scala:107:25] input [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_resp, // @[LazyModuleImp.scala:107:25] input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_ready, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_valid, // @[LazyModuleImp.scala:107:25] output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_id, // @[LazyModuleImp.scala:107:25] output [31:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_addr, // @[LazyModuleImp.scala:107:25] output [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_len, // @[LazyModuleImp.scala:107:25] output [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_burst, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_lock, // @[LazyModuleImp.scala:107:25] output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_cache, // @[LazyModuleImp.scala:107:25] output [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_prot, // @[LazyModuleImp.scala:107:25] output [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_qos, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_ready, // @[LazyModuleImp.scala:107:25] input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_valid, // @[LazyModuleImp.scala:107:25] input [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_id, // @[LazyModuleImp.scala:107:25] input [63:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_data, // @[LazyModuleImp.scala:107:25] input [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_resp, // @[LazyModuleImp.scala:107:25] input auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_last, // @[LazyModuleImp.scala:107:25] output auto_fixedClockNode_anon_out_1_clock, // @[LazyModuleImp.scala:107:25] output auto_fixedClockNode_anon_out_1_reset, // @[LazyModuleImp.scala:107:25] output auto_fixedClockNode_anon_out_0_clock, // @[LazyModuleImp.scala:107:25] output auto_fixedClockNode_anon_out_0_reset, // @[LazyModuleImp.scala:107:25] input auto_mbus_clock_groups_in_member_mbus_0_clock, // @[LazyModuleImp.scala:107:25] input auto_mbus_clock_groups_in_member_mbus_0_reset, // @[LazyModuleImp.scala:107:25] output auto_bus_xing_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_bus_xing_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_bus_xing_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_bus_xing_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_bus_xing_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [5:0] auto_bus_xing_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_bus_xing_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_bus_xing_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_bus_xing_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_bus_xing_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_bus_xing_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_bus_xing_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_bus_xing_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_bus_xing_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_bus_xing_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [5:0] auto_bus_xing_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output auto_bus_xing_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_bus_xing_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_bus_xing_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_bus_xing_in_d_bits_corrupt // @[LazyModuleImp.scala:107:25] ); wire coupler_to_mbusscratchpad00_auto_tl_out_d_valid; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_out_d_bits_corrupt; // @[LazyModuleImp.scala:138:7] wire [63:0] coupler_to_mbusscratchpad00_auto_tl_out_d_bits_data; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_out_d_bits_denied; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_out_d_bits_sink; // @[LazyModuleImp.scala:138:7] wire [5:0] coupler_to_mbusscratchpad00_auto_tl_out_d_bits_source; // @[LazyModuleImp.scala:138:7] wire [2:0] coupler_to_mbusscratchpad00_auto_tl_out_d_bits_size; // @[LazyModuleImp.scala:138:7] wire [1:0] coupler_to_mbusscratchpad00_auto_tl_out_d_bits_param; // @[LazyModuleImp.scala:138:7] wire [2:0] coupler_to_mbusscratchpad00_auto_tl_out_d_bits_opcode; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_out_a_ready; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_in_d_ready; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_in_a_valid; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_in_a_bits_corrupt; // @[LazyModuleImp.scala:138:7] wire [63:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_data; // @[LazyModuleImp.scala:138:7] wire [7:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_mask; // @[LazyModuleImp.scala:138:7] wire [27:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_address; // @[LazyModuleImp.scala:138:7] wire [5:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_source; // @[LazyModuleImp.scala:138:7] wire [2:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_size; // @[LazyModuleImp.scala:138:7] wire [2:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_param; // @[LazyModuleImp.scala:138:7] wire [2:0] coupler_to_mbusscratchpad00_auto_tl_in_a_bits_opcode; // @[LazyModuleImp.scala:138:7] wire [5:0] xbar_in_0_d_bits_source; // @[Xbar.scala:159:18] wire [5:0] xbar_in_0_a_bits_source; // @[Xbar.scala:159:18] wire xbar_auto_anon_out_d_valid; // @[Xbar.scala:74:9] wire xbar_auto_anon_out_d_bits_corrupt; // @[Xbar.scala:74:9] wire [63:0] xbar_auto_anon_out_d_bits_data; // @[Xbar.scala:74:9] wire xbar_auto_anon_out_d_bits_denied; // @[Xbar.scala:74:9] wire [5:0] xbar_auto_anon_out_d_bits_source; // @[Xbar.scala:74:9] wire [2:0] xbar_auto_anon_out_d_bits_size; // @[Xbar.scala:74:9] wire [2:0] xbar_auto_anon_out_d_bits_opcode; // @[Xbar.scala:74:9] wire xbar_auto_anon_out_a_ready; // @[Xbar.scala:74:9] wire xbar_auto_anon_in_d_ready; // @[Xbar.scala:74:9] wire xbar_auto_anon_in_a_valid; // @[Xbar.scala:74:9] wire xbar_auto_anon_in_a_bits_corrupt; // @[Xbar.scala:74:9] wire [63:0] xbar_auto_anon_in_a_bits_data; // @[Xbar.scala:74:9] wire [7:0] xbar_auto_anon_in_a_bits_mask; // @[Xbar.scala:74:9] wire [31:0] xbar_auto_anon_in_a_bits_address; // @[Xbar.scala:74:9] wire [5:0] xbar_auto_anon_in_a_bits_source; // @[Xbar.scala:74:9] wire [2:0] xbar_auto_anon_in_a_bits_size; // @[Xbar.scala:74:9] wire [2:0] xbar_auto_anon_in_a_bits_param; // @[Xbar.scala:74:9] wire [2:0] xbar_auto_anon_in_a_bits_opcode; // @[Xbar.scala:74:9] wire buffer_auto_in_d_valid; // @[Buffer.scala:40:9] wire buffer_auto_in_d_ready; // @[Buffer.scala:40:9] wire buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_in_d_bits_data; // @[Buffer.scala:40:9] wire buffer_auto_in_d_bits_denied; // @[Buffer.scala:40:9] wire buffer_auto_in_d_bits_sink; // @[Buffer.scala:40:9] wire [5:0] buffer_auto_in_d_bits_source; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_d_bits_size; // @[Buffer.scala:40:9] wire [1:0] buffer_auto_in_d_bits_param; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_d_bits_opcode; // @[Buffer.scala:40:9] wire buffer_auto_in_a_valid; // @[Buffer.scala:40:9] wire buffer_auto_in_a_ready; // @[Buffer.scala:40:9] wire buffer_auto_in_a_bits_corrupt; // @[Buffer.scala:40:9] wire [63:0] buffer_auto_in_a_bits_data; // @[Buffer.scala:40:9] wire [7:0] buffer_auto_in_a_bits_mask; // @[Buffer.scala:40:9] wire [31:0] buffer_auto_in_a_bits_address; // @[Buffer.scala:40:9] wire [5:0] buffer_auto_in_a_bits_source; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_a_bits_size; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_a_bits_param; // @[Buffer.scala:40:9] wire [2:0] buffer_auto_in_a_bits_opcode; // @[Buffer.scala:40:9] wire fixer_auto_anon_out_d_valid; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_out_d_bits_corrupt; // @[FIFOFixer.scala:50:9] wire [63:0] fixer_auto_anon_out_d_bits_data; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_out_d_bits_denied; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_out_d_bits_sink; // @[FIFOFixer.scala:50:9] wire [5:0] fixer_auto_anon_out_d_bits_source; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_auto_anon_out_d_bits_size; // @[FIFOFixer.scala:50:9] wire [1:0] fixer_auto_anon_out_d_bits_param; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_auto_anon_out_d_bits_opcode; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_out_a_ready; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_in_d_valid; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_in_d_ready; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_in_d_bits_corrupt; // @[FIFOFixer.scala:50:9] wire [63:0] fixer_auto_anon_in_d_bits_data; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_in_d_bits_denied; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_in_d_bits_sink; // @[FIFOFixer.scala:50:9] wire [5:0] fixer_auto_anon_in_d_bits_source; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_auto_anon_in_d_bits_size; // @[FIFOFixer.scala:50:9] wire [1:0] fixer_auto_anon_in_d_bits_param; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_auto_anon_in_d_bits_opcode; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_in_a_valid; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_in_a_ready; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_in_a_bits_corrupt; // @[FIFOFixer.scala:50:9] wire [63:0] fixer_auto_anon_in_a_bits_data; // @[FIFOFixer.scala:50:9] wire [7:0] fixer_auto_anon_in_a_bits_mask; // @[FIFOFixer.scala:50:9] wire [31:0] fixer_auto_anon_in_a_bits_address; // @[FIFOFixer.scala:50:9] wire [5:0] fixer_auto_anon_in_a_bits_source; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_auto_anon_in_a_bits_size; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_auto_anon_in_a_bits_param; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_auto_anon_in_a_bits_opcode; // @[FIFOFixer.scala:50:9] wire mbus_clock_groups_auto_out_member_mbus_0_reset; // @[ClockGroup.scala:53:9] wire mbus_clock_groups_auto_out_member_mbus_0_clock; // @[ClockGroup.scala:53:9] wire _coupler_to_memory_controller_port_named_axi4_auto_tl_in_a_ready; // @[LazyScope.scala:98:27] wire _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_valid; // @[LazyScope.scala:98:27] wire [2:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_opcode; // @[LazyScope.scala:98:27] wire [2:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_size; // @[LazyScope.scala:98:27] wire [5:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_source; // @[LazyScope.scala:98:27] wire _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_denied; // @[LazyScope.scala:98:27] wire [63:0] _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_data; // @[LazyScope.scala:98:27] wire _coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_corrupt; // @[LazyScope.scala:98:27] wire _picker_auto_in_1_a_ready; // @[ProbePicker.scala:69:28] wire _picker_auto_in_1_d_valid; // @[ProbePicker.scala:69:28] wire [2:0] _picker_auto_in_1_d_bits_opcode; // @[ProbePicker.scala:69:28] wire [1:0] _picker_auto_in_1_d_bits_param; // @[ProbePicker.scala:69:28] wire [2:0] _picker_auto_in_1_d_bits_size; // @[ProbePicker.scala:69:28] wire [5:0] _picker_auto_in_1_d_bits_source; // @[ProbePicker.scala:69:28] wire _picker_auto_in_1_d_bits_sink; // @[ProbePicker.scala:69:28] wire _picker_auto_in_1_d_bits_denied; // @[ProbePicker.scala:69:28] wire [63:0] _picker_auto_in_1_d_bits_data; // @[ProbePicker.scala:69:28] wire _picker_auto_in_1_d_bits_corrupt; // @[ProbePicker.scala:69:28] wire _picker_auto_in_0_a_ready; // @[ProbePicker.scala:69:28] wire _picker_auto_in_0_d_valid; // @[ProbePicker.scala:69:28] wire [2:0] _picker_auto_in_0_d_bits_opcode; // @[ProbePicker.scala:69:28] wire [2:0] _picker_auto_in_0_d_bits_size; // @[ProbePicker.scala:69:28] wire [5:0] _picker_auto_in_0_d_bits_source; // @[ProbePicker.scala:69:28] wire _picker_auto_in_0_d_bits_denied; // @[ProbePicker.scala:69:28] wire [63:0] _picker_auto_in_0_d_bits_data; // @[ProbePicker.scala:69:28] wire _picker_auto_in_0_d_bits_corrupt; // @[ProbePicker.scala:69:28] wire _picker_auto_out_0_a_valid; // @[ProbePicker.scala:69:28] wire [2:0] _picker_auto_out_0_a_bits_opcode; // @[ProbePicker.scala:69:28] wire [2:0] _picker_auto_out_0_a_bits_param; // @[ProbePicker.scala:69:28] wire [2:0] _picker_auto_out_0_a_bits_size; // @[ProbePicker.scala:69:28] wire [5:0] _picker_auto_out_0_a_bits_source; // @[ProbePicker.scala:69:28] wire [31:0] _picker_auto_out_0_a_bits_address; // @[ProbePicker.scala:69:28] wire [7:0] _picker_auto_out_0_a_bits_mask; // @[ProbePicker.scala:69:28] wire [63:0] _picker_auto_out_0_a_bits_data; // @[ProbePicker.scala:69:28] wire _picker_auto_out_0_a_bits_corrupt; // @[ProbePicker.scala:69:28] wire _picker_auto_out_0_d_ready; // @[ProbePicker.scala:69:28] wire _mbus_xbar_auto_anon_out_1_a_valid; // @[MemoryBus.scala:47:32] wire [2:0] _mbus_xbar_auto_anon_out_1_a_bits_opcode; // @[MemoryBus.scala:47:32] wire [2:0] _mbus_xbar_auto_anon_out_1_a_bits_param; // @[MemoryBus.scala:47:32] wire [2:0] _mbus_xbar_auto_anon_out_1_a_bits_size; // @[MemoryBus.scala:47:32] wire [5:0] _mbus_xbar_auto_anon_out_1_a_bits_source; // @[MemoryBus.scala:47:32] wire [27:0] _mbus_xbar_auto_anon_out_1_a_bits_address; // @[MemoryBus.scala:47:32] wire [7:0] _mbus_xbar_auto_anon_out_1_a_bits_mask; // @[MemoryBus.scala:47:32] wire [63:0] _mbus_xbar_auto_anon_out_1_a_bits_data; // @[MemoryBus.scala:47:32] wire _mbus_xbar_auto_anon_out_1_a_bits_corrupt; // @[MemoryBus.scala:47:32] wire _mbus_xbar_auto_anon_out_1_d_ready; // @[MemoryBus.scala:47:32] wire _mbus_xbar_auto_anon_out_0_a_valid; // @[MemoryBus.scala:47:32] wire [2:0] _mbus_xbar_auto_anon_out_0_a_bits_opcode; // @[MemoryBus.scala:47:32] wire [2:0] _mbus_xbar_auto_anon_out_0_a_bits_param; // @[MemoryBus.scala:47:32] wire [2:0] _mbus_xbar_auto_anon_out_0_a_bits_size; // @[MemoryBus.scala:47:32] wire [5:0] _mbus_xbar_auto_anon_out_0_a_bits_source; // @[MemoryBus.scala:47:32] wire [31:0] _mbus_xbar_auto_anon_out_0_a_bits_address; // @[MemoryBus.scala:47:32] wire [7:0] _mbus_xbar_auto_anon_out_0_a_bits_mask; // @[MemoryBus.scala:47:32] wire [63:0] _mbus_xbar_auto_anon_out_0_a_bits_data; // @[MemoryBus.scala:47:32] wire _mbus_xbar_auto_anon_out_0_a_bits_corrupt; // @[MemoryBus.scala:47:32] wire _mbus_xbar_auto_anon_out_0_d_ready; // @[MemoryBus.scala:47:32] wire auto_buffer_out_a_ready_0 = auto_buffer_out_a_ready; // @[ClockDomain.scala:14:9] wire auto_buffer_out_d_valid_0 = auto_buffer_out_d_valid; // @[ClockDomain.scala:14:9] wire [2:0] auto_buffer_out_d_bits_opcode_0 = auto_buffer_out_d_bits_opcode; // @[ClockDomain.scala:14:9] wire [1:0] auto_buffer_out_d_bits_param_0 = auto_buffer_out_d_bits_param; // @[ClockDomain.scala:14:9] wire [2:0] auto_buffer_out_d_bits_size_0 = auto_buffer_out_d_bits_size; // @[ClockDomain.scala:14:9] wire [5:0] auto_buffer_out_d_bits_source_0 = auto_buffer_out_d_bits_source; // @[ClockDomain.scala:14:9] wire auto_buffer_out_d_bits_sink_0 = auto_buffer_out_d_bits_sink; // @[ClockDomain.scala:14:9] wire auto_buffer_out_d_bits_denied_0 = auto_buffer_out_d_bits_denied; // @[ClockDomain.scala:14:9] wire [63:0] auto_buffer_out_d_bits_data_0 = auto_buffer_out_d_bits_data; // @[ClockDomain.scala:14:9] wire auto_buffer_out_d_bits_corrupt_0 = auto_buffer_out_d_bits_corrupt; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_ready_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_ready; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_ready_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_ready; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_valid_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_valid; // @[ClockDomain.scala:14:9] wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_id_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_id; // @[ClockDomain.scala:14:9] wire [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_resp_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_resp; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_ready_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_ready; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_valid_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_valid; // @[ClockDomain.scala:14:9] wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_id_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_id; // @[ClockDomain.scala:14:9] wire [63:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_data_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_data; // @[ClockDomain.scala:14:9] wire [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_resp_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_resp; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_last_0 = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_last; // @[ClockDomain.scala:14:9] wire auto_mbus_clock_groups_in_member_mbus_0_clock_0 = auto_mbus_clock_groups_in_member_mbus_0_clock; // @[ClockDomain.scala:14:9] wire auto_mbus_clock_groups_in_member_mbus_0_reset_0 = auto_mbus_clock_groups_in_member_mbus_0_reset; // @[ClockDomain.scala:14:9] wire auto_bus_xing_in_a_valid_0 = auto_bus_xing_in_a_valid; // @[ClockDomain.scala:14:9] wire [2:0] auto_bus_xing_in_a_bits_opcode_0 = auto_bus_xing_in_a_bits_opcode; // @[ClockDomain.scala:14:9] wire [2:0] auto_bus_xing_in_a_bits_param_0 = auto_bus_xing_in_a_bits_param; // @[ClockDomain.scala:14:9] wire [2:0] auto_bus_xing_in_a_bits_size_0 = auto_bus_xing_in_a_bits_size; // @[ClockDomain.scala:14:9] wire [5:0] auto_bus_xing_in_a_bits_source_0 = auto_bus_xing_in_a_bits_source; // @[ClockDomain.scala:14:9] wire [31:0] auto_bus_xing_in_a_bits_address_0 = auto_bus_xing_in_a_bits_address; // @[ClockDomain.scala:14:9] wire [7:0] auto_bus_xing_in_a_bits_mask_0 = auto_bus_xing_in_a_bits_mask; // @[ClockDomain.scala:14:9] wire [63:0] auto_bus_xing_in_a_bits_data_0 = auto_bus_xing_in_a_bits_data; // @[ClockDomain.scala:14:9] wire auto_bus_xing_in_a_bits_corrupt_0 = auto_bus_xing_in_a_bits_corrupt; // @[ClockDomain.scala:14:9] wire auto_bus_xing_in_d_ready_0 = auto_bus_xing_in_d_ready; // @[ClockDomain.scala:14:9] wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire mbus_clock_groups_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire mbus_clock_groups_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire mbus_clock_groups__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire clockGroup_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire clockGroup_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire clockGroup__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire broadcast_childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire broadcast_childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire broadcast__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire fixer__flight_WIRE_0 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_1 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_2 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_3 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_4 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_5 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_6 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_7 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_8 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_9 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_10 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_11 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_12 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_13 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_14 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_15 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_16 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_17 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_18 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_19 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_20 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_21 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_22 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_23 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_24 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_25 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_26 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_27 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_28 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_29 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_30 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_31 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_32 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_33 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_34 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_35 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_36 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_37 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_38 = 1'h0; // @[FIFOFixer.scala:79:35] wire fixer__flight_WIRE_39 = 1'h0; // @[FIFOFixer.scala:79:35] wire xbar_auto_anon_in_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire xbar_auto_anon_out_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire xbar_anonOut_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire xbar_anonIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17] wire xbar_in_0_d_bits_sink = 1'h0; // @[Xbar.scala:159:18] wire xbar_out_0_d_bits_sink = 1'h0; // @[Xbar.scala:216:19] wire xbar__out_0_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53] wire xbar__addressC_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire xbar__addressC_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire xbar__addressC_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire xbar__addressC_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire xbar__addressC_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire xbar__addressC_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire xbar__requestBOI_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire xbar__requestBOI_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire xbar__requestBOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire xbar__requestBOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire xbar__requestBOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire xbar__requestBOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire xbar__requestBOI_T = 1'h0; // @[Parameters.scala:54:10] wire xbar__requestDOI_T = 1'h0; // @[Parameters.scala:54:10] wire xbar__requestEIO_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire xbar__requestEIO_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire xbar__requestEIO_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire xbar__requestEIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire xbar__requestEIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire xbar__requestEIO_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire xbar__beatsBO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire xbar__beatsBO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire xbar__beatsBO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire xbar__beatsBO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire xbar__beatsBO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire xbar__beatsBO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire xbar__beatsBO_opdata_T = 1'h0; // @[Edges.scala:97:37] wire xbar__beatsCI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire xbar__beatsCI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire xbar__beatsCI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire xbar__beatsCI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire xbar__beatsCI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire xbar__beatsCI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire xbar_beatsCI_opdata = 1'h0; // @[Edges.scala:102:36] wire xbar__beatsEI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire xbar__beatsEI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire xbar__beatsEI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire xbar__beatsEI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire xbar__beatsEI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire xbar__beatsEI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire xbar__portsBIO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire xbar__portsBIO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire xbar__portsBIO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire xbar__portsBIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire xbar__portsBIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire xbar__portsBIO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire xbar_portsBIO_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire xbar_portsBIO_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire xbar_portsBIO_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire xbar__portsBIO_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire xbar__portsCOI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire xbar__portsCOI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire xbar__portsCOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire xbar__portsCOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire xbar__portsCOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire xbar__portsCOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire xbar_portsCOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire xbar_portsCOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire xbar_portsCOI_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire xbar__portsCOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire xbar_portsDIO_filtered_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire xbar__portsEOI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire xbar__portsEOI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire xbar__portsEOI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire xbar__portsEOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire xbar__portsEOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire xbar__portsEOI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire xbar_portsEOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire xbar_portsEOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire xbar_portsEOI_filtered_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire xbar__portsEOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire [1:0] xbar_auto_anon_in_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] xbar_auto_anon_out_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] xbar_anonOut_d_bits_param = 2'h0; // @[MixedNode.scala:542:17] wire [1:0] xbar_anonIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17] wire [1:0] xbar_in_0_d_bits_param = 2'h0; // @[Xbar.scala:159:18] wire [1:0] xbar_out_0_d_bits_param = 2'h0; // @[Xbar.scala:216:19] wire [1:0] xbar__requestBOI_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] xbar__requestBOI_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] xbar__beatsBO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] xbar__beatsBO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] xbar__portsBIO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] xbar__portsBIO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] xbar_portsBIO_filtered_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] xbar_portsDIO_filtered_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire fixer__a_notFIFO_T_4 = 1'h1; // @[Parameters.scala:137:59] wire fixer__flight_T = 1'h1; // @[FIFOFixer.scala:80:65] wire fixer__anonOut_a_valid_T = 1'h1; // @[FIFOFixer.scala:95:50] wire fixer__anonOut_a_valid_T_1 = 1'h1; // @[FIFOFixer.scala:95:47] wire fixer__anonIn_a_ready_T = 1'h1; // @[FIFOFixer.scala:96:50] wire fixer__anonIn_a_ready_T_1 = 1'h1; // @[FIFOFixer.scala:96:47] wire xbar__requestAIO_T_4 = 1'h1; // @[Parameters.scala:137:59] wire xbar_requestAIO_0_0 = 1'h1; // @[Xbar.scala:307:107] wire xbar__requestCIO_T_4 = 1'h1; // @[Parameters.scala:137:59] wire xbar_requestCIO_0_0 = 1'h1; // @[Xbar.scala:308:107] wire xbar__requestBOI_T_1 = 1'h1; // @[Parameters.scala:54:32] wire xbar__requestBOI_T_2 = 1'h1; // @[Parameters.scala:56:32] wire xbar__requestBOI_T_3 = 1'h1; // @[Parameters.scala:54:67] wire xbar__requestBOI_T_4 = 1'h1; // @[Parameters.scala:57:20] wire xbar_requestBOI_0_0 = 1'h1; // @[Parameters.scala:56:48] wire xbar__requestDOI_T_1 = 1'h1; // @[Parameters.scala:54:32] wire xbar__requestDOI_T_2 = 1'h1; // @[Parameters.scala:56:32] wire xbar__requestDOI_T_3 = 1'h1; // @[Parameters.scala:54:67] wire xbar__requestDOI_T_4 = 1'h1; // @[Parameters.scala:57:20] wire xbar_requestDOI_0_0 = 1'h1; // @[Parameters.scala:56:48] wire xbar_beatsBO_opdata = 1'h1; // @[Edges.scala:97:28] wire xbar__portsAOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire xbar__portsBIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire xbar__portsCOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire xbar__portsDIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire xbar__portsEOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire [63:0] xbar__addressC_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] xbar__addressC_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] xbar__requestBOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] xbar__requestBOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] xbar__beatsBO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] xbar__beatsBO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] xbar__beatsCI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] xbar__beatsCI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] xbar__portsBIO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] xbar__portsBIO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] xbar_portsBIO_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] xbar__portsCOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] xbar__portsCOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] xbar_portsCOI_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [31:0] xbar__addressC_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] xbar__addressC_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] xbar__requestCIO_T = 32'h0; // @[Parameters.scala:137:31] wire [31:0] xbar__requestBOI_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] xbar__requestBOI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] xbar__beatsBO_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] xbar__beatsBO_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] xbar__beatsCI_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] xbar__beatsCI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] xbar__portsBIO_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] xbar__portsBIO_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] xbar_portsBIO_filtered_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] xbar__portsCOI_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] xbar__portsCOI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] xbar_portsCOI_filtered_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [5:0] xbar__addressC_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] xbar__addressC_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] xbar__requestBOI_WIRE_bits_source = 6'h0; // @[Bundles.scala:264:74] wire [5:0] xbar__requestBOI_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:264:61] wire [5:0] xbar__requestBOI_uncommonBits_T = 6'h0; // @[Parameters.scala:52:29] wire [5:0] xbar_requestBOI_uncommonBits = 6'h0; // @[Parameters.scala:52:56] wire [5:0] xbar__beatsBO_WIRE_bits_source = 6'h0; // @[Bundles.scala:264:74] wire [5:0] xbar__beatsBO_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:264:61] wire [5:0] xbar__beatsBO_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] xbar__beatsCI_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] xbar__beatsCI_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] xbar__beatsCI_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] xbar__portsBIO_WIRE_bits_source = 6'h0; // @[Bundles.scala:264:74] wire [5:0] xbar__portsBIO_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:264:61] wire [5:0] xbar_portsBIO_filtered_0_bits_source = 6'h0; // @[Xbar.scala:352:24] wire [5:0] xbar__portsCOI_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] xbar__portsCOI_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] xbar_portsCOI_filtered_0_bits_source = 6'h0; // @[Xbar.scala:352:24] wire [2:0] xbar__addressC_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] xbar__addressC_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] xbar__addressC_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] xbar__addressC_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] xbar__addressC_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] xbar__addressC_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] xbar__requestBOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] xbar__requestBOI_WIRE_bits_size = 3'h0; // @[Bundles.scala:264:74] wire [2:0] xbar__requestBOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] xbar__requestBOI_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:264:61] wire [2:0] xbar__beatsBO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] xbar__beatsBO_WIRE_bits_size = 3'h0; // @[Bundles.scala:264:74] wire [2:0] xbar__beatsBO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] xbar__beatsBO_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:264:61] wire [2:0] xbar_beatsBO_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] xbar_beatsBO_0 = 3'h0; // @[Edges.scala:221:14] wire [2:0] xbar__beatsCI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] xbar__beatsCI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] xbar__beatsCI_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] xbar__beatsCI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] xbar__beatsCI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] xbar__beatsCI_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] xbar_beatsCI_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] xbar_beatsCI_0 = 3'h0; // @[Edges.scala:221:14] wire [2:0] xbar__portsBIO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] xbar__portsBIO_WIRE_bits_size = 3'h0; // @[Bundles.scala:264:74] wire [2:0] xbar__portsBIO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] xbar__portsBIO_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:264:61] wire [2:0] xbar_portsBIO_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] xbar_portsBIO_filtered_0_bits_size = 3'h0; // @[Xbar.scala:352:24] wire [2:0] xbar__portsCOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] xbar__portsCOI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] xbar__portsCOI_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] xbar__portsCOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] xbar__portsCOI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] xbar__portsCOI_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] xbar_portsCOI_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] xbar_portsCOI_filtered_0_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] xbar_portsCOI_filtered_0_bits_size = 3'h0; // @[Xbar.scala:352:24] wire [7:0] xbar__requestBOI_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] xbar__requestBOI_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] xbar__beatsBO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] xbar__beatsBO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] xbar__portsBIO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] xbar__portsBIO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] xbar_portsBIO_filtered_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [5:0] xbar__beatsBO_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [5:0] xbar__beatsCI_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] xbar__beatsBO_decode_T = 13'h3F; // @[package.scala:243:71] wire [12:0] xbar__beatsCI_decode_T = 13'h3F; // @[package.scala:243:71] wire [39:0] fixer__allIDs_FIFOed_T = 40'hFFFFFFFFFF; // @[FIFOFixer.scala:127:48] wire [32:0] fixer__a_notFIFO_T_2 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] fixer__a_notFIFO_T_3 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] xbar__requestAIO_T_2 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] xbar__requestAIO_T_3 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] xbar__requestCIO_T_1 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] xbar__requestCIO_T_2 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] xbar__requestCIO_T_3 = 33'h0; // @[Parameters.scala:137:46] wire mbus_clock_groups_auto_in_member_mbus_0_clock = auto_mbus_clock_groups_in_member_mbus_0_clock_0; // @[ClockGroup.scala:53:9] wire mbus_clock_groups_auto_in_member_mbus_0_reset = auto_mbus_clock_groups_in_member_mbus_0_reset_0; // @[ClockGroup.scala:53:9] wire bus_xingIn_a_ready; // @[MixedNode.scala:551:17] wire bus_xingIn_a_valid = auto_bus_xing_in_a_valid_0; // @[ClockDomain.scala:14:9] wire [2:0] bus_xingIn_a_bits_opcode = auto_bus_xing_in_a_bits_opcode_0; // @[ClockDomain.scala:14:9] wire [2:0] bus_xingIn_a_bits_param = auto_bus_xing_in_a_bits_param_0; // @[ClockDomain.scala:14:9] wire [2:0] bus_xingIn_a_bits_size = auto_bus_xing_in_a_bits_size_0; // @[ClockDomain.scala:14:9] wire [5:0] bus_xingIn_a_bits_source = auto_bus_xing_in_a_bits_source_0; // @[ClockDomain.scala:14:9] wire [31:0] bus_xingIn_a_bits_address = auto_bus_xing_in_a_bits_address_0; // @[ClockDomain.scala:14:9] wire [7:0] bus_xingIn_a_bits_mask = auto_bus_xing_in_a_bits_mask_0; // @[ClockDomain.scala:14:9] wire [63:0] bus_xingIn_a_bits_data = auto_bus_xing_in_a_bits_data_0; // @[ClockDomain.scala:14:9] wire bus_xingIn_a_bits_corrupt = auto_bus_xing_in_a_bits_corrupt_0; // @[ClockDomain.scala:14:9] wire bus_xingIn_d_ready = auto_bus_xing_in_d_ready_0; // @[ClockDomain.scala:14:9] wire bus_xingIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] bus_xingIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] bus_xingIn_d_bits_param; // @[MixedNode.scala:551:17] wire [2:0] bus_xingIn_d_bits_size; // @[MixedNode.scala:551:17] wire [5:0] bus_xingIn_d_bits_source; // @[MixedNode.scala:551:17] wire bus_xingIn_d_bits_sink; // @[MixedNode.scala:551:17] wire bus_xingIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] bus_xingIn_d_bits_data; // @[MixedNode.scala:551:17] wire bus_xingIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire [2:0] auto_buffer_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_buffer_out_a_bits_param_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_buffer_out_a_bits_size_0; // @[ClockDomain.scala:14:9] wire [5:0] auto_buffer_out_a_bits_source_0; // @[ClockDomain.scala:14:9] wire [27:0] auto_buffer_out_a_bits_address_0; // @[ClockDomain.scala:14:9] wire [7:0] auto_buffer_out_a_bits_mask_0; // @[ClockDomain.scala:14:9] wire [63:0] auto_buffer_out_a_bits_data_0; // @[ClockDomain.scala:14:9] wire auto_buffer_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9] wire auto_buffer_out_a_valid_0; // @[ClockDomain.scala:14:9] wire auto_buffer_out_d_ready_0; // @[ClockDomain.scala:14:9] wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_id_0; // @[ClockDomain.scala:14:9] wire [31:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_addr_0; // @[ClockDomain.scala:14:9] wire [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_len_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_size_0; // @[ClockDomain.scala:14:9] wire [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_burst_0; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_lock_0; // @[ClockDomain.scala:14:9] wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_cache_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_prot_0; // @[ClockDomain.scala:14:9] wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_qos_0; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_valid_0; // @[ClockDomain.scala:14:9] wire [63:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_data_0; // @[ClockDomain.scala:14:9] wire [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_strb_0; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_last_0; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_valid_0; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_ready_0; // @[ClockDomain.scala:14:9] wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_id_0; // @[ClockDomain.scala:14:9] wire [31:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_addr_0; // @[ClockDomain.scala:14:9] wire [7:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_len_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_size_0; // @[ClockDomain.scala:14:9] wire [1:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_burst_0; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_lock_0; // @[ClockDomain.scala:14:9] wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_cache_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_prot_0; // @[ClockDomain.scala:14:9] wire [3:0] auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_qos_0; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_valid_0; // @[ClockDomain.scala:14:9] wire auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_ready_0; // @[ClockDomain.scala:14:9] wire auto_fixedClockNode_anon_out_1_clock_0; // @[ClockDomain.scala:14:9] wire auto_fixedClockNode_anon_out_1_reset_0; // @[ClockDomain.scala:14:9] wire auto_fixedClockNode_anon_out_0_clock_0; // @[ClockDomain.scala:14:9] wire auto_fixedClockNode_anon_out_0_reset_0; // @[ClockDomain.scala:14:9] wire auto_bus_xing_in_a_ready_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_bus_xing_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9] wire [1:0] auto_bus_xing_in_d_bits_param_0; // @[ClockDomain.scala:14:9] wire [2:0] auto_bus_xing_in_d_bits_size_0; // @[ClockDomain.scala:14:9] wire [5:0] auto_bus_xing_in_d_bits_source_0; // @[ClockDomain.scala:14:9] wire auto_bus_xing_in_d_bits_sink_0; // @[ClockDomain.scala:14:9] wire auto_bus_xing_in_d_bits_denied_0; // @[ClockDomain.scala:14:9] wire [63:0] auto_bus_xing_in_d_bits_data_0; // @[ClockDomain.scala:14:9] wire auto_bus_xing_in_d_bits_corrupt_0; // @[ClockDomain.scala:14:9] wire auto_bus_xing_in_d_valid_0; // @[ClockDomain.scala:14:9] wire clockSinkNodeIn_clock; // @[MixedNode.scala:551:17] wire clockSinkNodeIn_reset; // @[MixedNode.scala:551:17] wire childClock; // @[LazyModuleImp.scala:155:31] wire childReset; // @[LazyModuleImp.scala:158:31] wire mbus_clock_groups_nodeIn_member_mbus_0_clock = mbus_clock_groups_auto_in_member_mbus_0_clock; // @[ClockGroup.scala:53:9] wire mbus_clock_groups_nodeOut_member_mbus_0_clock; // @[MixedNode.scala:542:17] wire mbus_clock_groups_nodeIn_member_mbus_0_reset = mbus_clock_groups_auto_in_member_mbus_0_reset; // @[ClockGroup.scala:53:9] wire mbus_clock_groups_nodeOut_member_mbus_0_reset; // @[MixedNode.scala:542:17] wire clockGroup_auto_in_member_mbus_0_clock = mbus_clock_groups_auto_out_member_mbus_0_clock; // @[ClockGroup.scala:24:9, :53:9] wire clockGroup_auto_in_member_mbus_0_reset = mbus_clock_groups_auto_out_member_mbus_0_reset; // @[ClockGroup.scala:24:9, :53:9] assign mbus_clock_groups_auto_out_member_mbus_0_clock = mbus_clock_groups_nodeOut_member_mbus_0_clock; // @[ClockGroup.scala:53:9] assign mbus_clock_groups_auto_out_member_mbus_0_reset = mbus_clock_groups_nodeOut_member_mbus_0_reset; // @[ClockGroup.scala:53:9] assign mbus_clock_groups_nodeOut_member_mbus_0_clock = mbus_clock_groups_nodeIn_member_mbus_0_clock; // @[MixedNode.scala:542:17, :551:17] assign mbus_clock_groups_nodeOut_member_mbus_0_reset = mbus_clock_groups_nodeIn_member_mbus_0_reset; // @[MixedNode.scala:542:17, :551:17] wire clockGroup_nodeIn_member_mbus_0_clock = clockGroup_auto_in_member_mbus_0_clock; // @[ClockGroup.scala:24:9] wire clockGroup_nodeOut_clock; // @[MixedNode.scala:542:17] wire clockGroup_nodeIn_member_mbus_0_reset = clockGroup_auto_in_member_mbus_0_reset; // @[ClockGroup.scala:24:9] wire clockGroup_nodeOut_reset; // @[MixedNode.scala:542:17] wire clockGroup_auto_out_clock; // @[ClockGroup.scala:24:9] wire clockGroup_auto_out_reset; // @[ClockGroup.scala:24:9] assign clockGroup_auto_out_clock = clockGroup_nodeOut_clock; // @[ClockGroup.scala:24:9] assign clockGroup_auto_out_reset = clockGroup_nodeOut_reset; // @[ClockGroup.scala:24:9] assign clockGroup_nodeOut_clock = clockGroup_nodeIn_member_mbus_0_clock; // @[MixedNode.scala:542:17, :551:17] assign clockGroup_nodeOut_reset = clockGroup_nodeIn_member_mbus_0_reset; // @[MixedNode.scala:542:17, :551:17] wire fixer_anonIn_a_ready; // @[MixedNode.scala:551:17] wire buffer_auto_out_a_ready = fixer_auto_anon_in_a_ready; // @[FIFOFixer.scala:50:9] wire buffer_auto_out_a_valid; // @[Buffer.scala:40:9] wire fixer_anonIn_a_valid = fixer_auto_anon_in_a_valid; // @[FIFOFixer.scala:50:9] wire [2:0] buffer_auto_out_a_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] fixer_anonIn_a_bits_opcode = fixer_auto_anon_in_a_bits_opcode; // @[FIFOFixer.scala:50:9] wire [2:0] buffer_auto_out_a_bits_param; // @[Buffer.scala:40:9] wire [2:0] fixer_anonIn_a_bits_param = fixer_auto_anon_in_a_bits_param; // @[FIFOFixer.scala:50:9] wire [2:0] buffer_auto_out_a_bits_size; // @[Buffer.scala:40:9] wire [2:0] fixer_anonIn_a_bits_size = fixer_auto_anon_in_a_bits_size; // @[FIFOFixer.scala:50:9] wire [5:0] buffer_auto_out_a_bits_source; // @[Buffer.scala:40:9] wire [5:0] fixer_anonIn_a_bits_source = fixer_auto_anon_in_a_bits_source; // @[FIFOFixer.scala:50:9] wire [31:0] buffer_auto_out_a_bits_address; // @[Buffer.scala:40:9] wire [31:0] fixer_anonIn_a_bits_address = fixer_auto_anon_in_a_bits_address; // @[FIFOFixer.scala:50:9] wire [7:0] buffer_auto_out_a_bits_mask; // @[Buffer.scala:40:9] wire [7:0] fixer_anonIn_a_bits_mask = fixer_auto_anon_in_a_bits_mask; // @[FIFOFixer.scala:50:9] wire [63:0] buffer_auto_out_a_bits_data; // @[Buffer.scala:40:9] wire [63:0] fixer_anonIn_a_bits_data = fixer_auto_anon_in_a_bits_data; // @[FIFOFixer.scala:50:9] wire buffer_auto_out_a_bits_corrupt; // @[Buffer.scala:40:9] wire fixer_anonIn_a_bits_corrupt = fixer_auto_anon_in_a_bits_corrupt; // @[FIFOFixer.scala:50:9] wire buffer_auto_out_d_ready; // @[Buffer.scala:40:9] wire fixer_anonIn_d_ready = fixer_auto_anon_in_d_ready; // @[FIFOFixer.scala:50:9] wire fixer_anonIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] fixer_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire buffer_auto_out_d_valid = fixer_auto_anon_in_d_valid; // @[FIFOFixer.scala:50:9] wire [1:0] fixer_anonIn_d_bits_param; // @[MixedNode.scala:551:17] wire [2:0] buffer_auto_out_d_bits_opcode = fixer_auto_anon_in_d_bits_opcode; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_anonIn_d_bits_size; // @[MixedNode.scala:551:17] wire [1:0] buffer_auto_out_d_bits_param = fixer_auto_anon_in_d_bits_param; // @[FIFOFixer.scala:50:9] wire [5:0] fixer_anonIn_d_bits_source; // @[MixedNode.scala:551:17] wire [2:0] buffer_auto_out_d_bits_size = fixer_auto_anon_in_d_bits_size; // @[FIFOFixer.scala:50:9] wire fixer_anonIn_d_bits_sink; // @[MixedNode.scala:551:17] wire [5:0] buffer_auto_out_d_bits_source = fixer_auto_anon_in_d_bits_source; // @[FIFOFixer.scala:50:9] wire fixer_anonIn_d_bits_denied; // @[MixedNode.scala:551:17] wire buffer_auto_out_d_bits_sink = fixer_auto_anon_in_d_bits_sink; // @[FIFOFixer.scala:50:9] wire [63:0] fixer_anonIn_d_bits_data; // @[MixedNode.scala:551:17] wire buffer_auto_out_d_bits_denied = fixer_auto_anon_in_d_bits_denied; // @[FIFOFixer.scala:50:9] wire fixer_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire [63:0] buffer_auto_out_d_bits_data = fixer_auto_anon_in_d_bits_data; // @[FIFOFixer.scala:50:9] wire buffer_auto_out_d_bits_corrupt = fixer_auto_anon_in_d_bits_corrupt; // @[FIFOFixer.scala:50:9] wire fixer_anonOut_a_ready = fixer_auto_anon_out_a_ready; // @[FIFOFixer.scala:50:9] wire fixer_anonOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] fixer_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] fixer_anonOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] fixer_anonOut_a_bits_size; // @[MixedNode.scala:542:17] wire [5:0] fixer_anonOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] fixer_anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] fixer_anonOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] fixer_anonOut_a_bits_data; // @[MixedNode.scala:542:17] wire fixer_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire fixer_anonOut_d_ready; // @[MixedNode.scala:542:17] wire fixer_anonOut_d_valid = fixer_auto_anon_out_d_valid; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_anonOut_d_bits_opcode = fixer_auto_anon_out_d_bits_opcode; // @[FIFOFixer.scala:50:9] wire [1:0] fixer_anonOut_d_bits_param = fixer_auto_anon_out_d_bits_param; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_anonOut_d_bits_size = fixer_auto_anon_out_d_bits_size; // @[FIFOFixer.scala:50:9] wire [5:0] fixer_anonOut_d_bits_source = fixer_auto_anon_out_d_bits_source; // @[FIFOFixer.scala:50:9] wire fixer_anonOut_d_bits_sink = fixer_auto_anon_out_d_bits_sink; // @[FIFOFixer.scala:50:9] wire fixer_anonOut_d_bits_denied = fixer_auto_anon_out_d_bits_denied; // @[FIFOFixer.scala:50:9] wire [63:0] fixer_anonOut_d_bits_data = fixer_auto_anon_out_d_bits_data; // @[FIFOFixer.scala:50:9] wire fixer_anonOut_d_bits_corrupt = fixer_auto_anon_out_d_bits_corrupt; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_auto_anon_out_a_bits_opcode; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_auto_anon_out_a_bits_param; // @[FIFOFixer.scala:50:9] wire [2:0] fixer_auto_anon_out_a_bits_size; // @[FIFOFixer.scala:50:9] wire [5:0] fixer_auto_anon_out_a_bits_source; // @[FIFOFixer.scala:50:9] wire [31:0] fixer_auto_anon_out_a_bits_address; // @[FIFOFixer.scala:50:9] wire [7:0] fixer_auto_anon_out_a_bits_mask; // @[FIFOFixer.scala:50:9] wire [63:0] fixer_auto_anon_out_a_bits_data; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_out_a_bits_corrupt; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_out_a_valid; // @[FIFOFixer.scala:50:9] wire fixer_auto_anon_out_d_ready; // @[FIFOFixer.scala:50:9] wire fixer__anonOut_a_valid_T_2; // @[FIFOFixer.scala:95:33] wire fixer__anonIn_a_ready_T_2 = fixer_anonOut_a_ready; // @[FIFOFixer.scala:96:33] assign fixer_auto_anon_out_a_valid = fixer_anonOut_a_valid; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_out_a_bits_opcode = fixer_anonOut_a_bits_opcode; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_out_a_bits_param = fixer_anonOut_a_bits_param; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_out_a_bits_size = fixer_anonOut_a_bits_size; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_out_a_bits_source = fixer_anonOut_a_bits_source; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_out_a_bits_address = fixer_anonOut_a_bits_address; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_out_a_bits_mask = fixer_anonOut_a_bits_mask; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_out_a_bits_data = fixer_anonOut_a_bits_data; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_out_a_bits_corrupt = fixer_anonOut_a_bits_corrupt; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_out_d_ready = fixer_anonOut_d_ready; // @[FIFOFixer.scala:50:9] assign fixer_anonIn_d_valid = fixer_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonIn_d_bits_opcode = fixer_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonIn_d_bits_param = fixer_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonIn_d_bits_size = fixer_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonIn_d_bits_source = fixer_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonIn_d_bits_sink = fixer_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonIn_d_bits_denied = fixer_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonIn_d_bits_data = fixer_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonIn_d_bits_corrupt = fixer_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign fixer_auto_anon_in_a_ready = fixer_anonIn_a_ready; // @[FIFOFixer.scala:50:9] assign fixer__anonOut_a_valid_T_2 = fixer_anonIn_a_valid; // @[FIFOFixer.scala:95:33] assign fixer_anonOut_a_bits_opcode = fixer_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonOut_a_bits_param = fixer_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonOut_a_bits_size = fixer_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonOut_a_bits_source = fixer_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonOut_a_bits_address = fixer_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] wire [31:0] fixer__a_notFIFO_T = fixer_anonIn_a_bits_address; // @[Parameters.scala:137:31] wire [31:0] fixer__a_id_T = fixer_anonIn_a_bits_address; // @[Parameters.scala:137:31] assign fixer_anonOut_a_bits_mask = fixer_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonOut_a_bits_data = fixer_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonOut_a_bits_corrupt = fixer_anonIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign fixer_anonOut_d_ready = fixer_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign fixer_auto_anon_in_d_valid = fixer_anonIn_d_valid; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_in_d_bits_opcode = fixer_anonIn_d_bits_opcode; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_in_d_bits_param = fixer_anonIn_d_bits_param; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_in_d_bits_size = fixer_anonIn_d_bits_size; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_in_d_bits_source = fixer_anonIn_d_bits_source; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_in_d_bits_sink = fixer_anonIn_d_bits_sink; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_in_d_bits_denied = fixer_anonIn_d_bits_denied; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_in_d_bits_data = fixer_anonIn_d_bits_data; // @[FIFOFixer.scala:50:9] assign fixer_auto_anon_in_d_bits_corrupt = fixer_anonIn_d_bits_corrupt; // @[FIFOFixer.scala:50:9] wire [32:0] fixer__a_notFIFO_T_1 = {1'h0, fixer__a_notFIFO_T}; // @[Parameters.scala:137:{31,41}] wire [32:0] fixer__a_id_T_1 = {1'h0, fixer__a_id_T}; // @[Parameters.scala:137:{31,41}] wire [32:0] fixer__a_id_T_2 = fixer__a_id_T_1 & 33'h80000000; // @[Parameters.scala:137:{41,46}] wire [32:0] fixer__a_id_T_3 = fixer__a_id_T_2; // @[Parameters.scala:137:46] wire fixer__a_id_T_4 = fixer__a_id_T_3 == 33'h0; // @[Parameters.scala:137:{46,59}] wire [31:0] fixer__a_id_T_5 = fixer_anonIn_a_bits_address ^ 32'h80000000; // @[Parameters.scala:137:31] wire [32:0] fixer__a_id_T_6 = {1'h0, fixer__a_id_T_5}; // @[Parameters.scala:137:{31,41}] wire [32:0] fixer__a_id_T_7 = fixer__a_id_T_6 & 33'h80000000; // @[Parameters.scala:137:{41,46}] wire [32:0] fixer__a_id_T_8 = fixer__a_id_T_7; // @[Parameters.scala:137:46] wire fixer__a_id_T_9 = fixer__a_id_T_8 == 33'h0; // @[Parameters.scala:137:{46,59}] wire fixer__a_id_T_11 = fixer__a_id_T_9; // @[Mux.scala:30:73] wire [1:0] fixer__a_id_T_10 = {fixer__a_id_T_4, 1'h0}; // @[Mux.scala:30:73] wire [1:0] fixer__a_id_T_12 = {fixer__a_id_T_10[1], fixer__a_id_T_10[0] | fixer__a_id_T_11}; // @[Mux.scala:30:73] wire [1:0] fixer_a_id = fixer__a_id_T_12; // @[Mux.scala:30:73] wire fixer_a_noDomain = fixer_a_id == 2'h0; // @[Mux.scala:30:73] wire fixer__a_first_T = fixer_anonIn_a_ready & fixer_anonIn_a_valid; // @[Decoupled.scala:51:35] wire [12:0] fixer__a_first_beats1_decode_T = 13'h3F << fixer_anonIn_a_bits_size; // @[package.scala:243:71] wire [5:0] fixer__a_first_beats1_decode_T_1 = fixer__a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] fixer__a_first_beats1_decode_T_2 = ~fixer__a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] fixer_a_first_beats1_decode = fixer__a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire fixer__a_first_beats1_opdata_T = fixer_anonIn_a_bits_opcode[2]; // @[Edges.scala:92:37] wire fixer_a_first_beats1_opdata = ~fixer__a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] fixer_a_first_beats1 = fixer_a_first_beats1_opdata ? fixer_a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] fixer_a_first_counter; // @[Edges.scala:229:27] wire [3:0] fixer__a_first_counter1_T = {1'h0, fixer_a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] fixer_a_first_counter1 = fixer__a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire fixer_a_first = fixer_a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire fixer__a_first_last_T = fixer_a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire fixer__a_first_last_T_1 = fixer_a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire fixer_a_first_last = fixer__a_first_last_T | fixer__a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire fixer_a_first_done = fixer_a_first_last & fixer__a_first_T; // @[Decoupled.scala:51:35] wire [2:0] fixer__a_first_count_T = ~fixer_a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] fixer_a_first_count = fixer_a_first_beats1 & fixer__a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] fixer__a_first_counter_T = fixer_a_first ? fixer_a_first_beats1 : fixer_a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire fixer__d_first_T = fixer_anonOut_d_ready & fixer_anonOut_d_valid; // @[Decoupled.scala:51:35] wire [12:0] fixer__d_first_beats1_decode_T = 13'h3F << fixer_anonOut_d_bits_size; // @[package.scala:243:71] wire [5:0] fixer__d_first_beats1_decode_T_1 = fixer__d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] fixer__d_first_beats1_decode_T_2 = ~fixer__d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] fixer_d_first_beats1_decode = fixer__d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire fixer_d_first_beats1_opdata = fixer_anonOut_d_bits_opcode[0]; // @[Edges.scala:106:36] wire [2:0] fixer_d_first_beats1 = fixer_d_first_beats1_opdata ? fixer_d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] fixer_d_first_counter; // @[Edges.scala:229:27] wire [3:0] fixer__d_first_counter1_T = {1'h0, fixer_d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] fixer_d_first_counter1 = fixer__d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire fixer_d_first_first = fixer_d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire fixer__d_first_last_T = fixer_d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire fixer__d_first_last_T_1 = fixer_d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire fixer_d_first_last = fixer__d_first_last_T | fixer__d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire fixer_d_first_done = fixer_d_first_last & fixer__d_first_T; // @[Decoupled.scala:51:35] wire [2:0] fixer__d_first_count_T = ~fixer_d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] fixer_d_first_count = fixer_d_first_beats1 & fixer__d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] fixer__d_first_counter_T = fixer_d_first_first ? fixer_d_first_beats1 : fixer_d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire fixer__d_first_T_1 = fixer_anonOut_d_bits_opcode != 3'h6; // @[FIFOFixer.scala:75:63] wire fixer_d_first = fixer_d_first_first & fixer__d_first_T_1; // @[FIFOFixer.scala:75:{42,63}] reg fixer_flight_0; // @[FIFOFixer.scala:79:27] reg fixer_flight_1; // @[FIFOFixer.scala:79:27] reg fixer_flight_2; // @[FIFOFixer.scala:79:27] reg fixer_flight_3; // @[FIFOFixer.scala:79:27] reg fixer_flight_4; // @[FIFOFixer.scala:79:27] reg fixer_flight_5; // @[FIFOFixer.scala:79:27] reg fixer_flight_6; // @[FIFOFixer.scala:79:27] reg fixer_flight_7; // @[FIFOFixer.scala:79:27] reg fixer_flight_8; // @[FIFOFixer.scala:79:27] reg fixer_flight_9; // @[FIFOFixer.scala:79:27] reg fixer_flight_10; // @[FIFOFixer.scala:79:27] reg fixer_flight_11; // @[FIFOFixer.scala:79:27] reg fixer_flight_12; // @[FIFOFixer.scala:79:27] reg fixer_flight_13; // @[FIFOFixer.scala:79:27] reg fixer_flight_14; // @[FIFOFixer.scala:79:27] reg fixer_flight_15; // @[FIFOFixer.scala:79:27] reg fixer_flight_16; // @[FIFOFixer.scala:79:27] reg fixer_flight_17; // @[FIFOFixer.scala:79:27] reg fixer_flight_18; // @[FIFOFixer.scala:79:27] reg fixer_flight_19; // @[FIFOFixer.scala:79:27] reg fixer_flight_20; // @[FIFOFixer.scala:79:27] reg fixer_flight_21; // @[FIFOFixer.scala:79:27] reg fixer_flight_22; // @[FIFOFixer.scala:79:27] reg fixer_flight_23; // @[FIFOFixer.scala:79:27] reg fixer_flight_24; // @[FIFOFixer.scala:79:27] reg fixer_flight_25; // @[FIFOFixer.scala:79:27] reg fixer_flight_26; // @[FIFOFixer.scala:79:27] reg fixer_flight_27; // @[FIFOFixer.scala:79:27] reg fixer_flight_28; // @[FIFOFixer.scala:79:27] reg fixer_flight_29; // @[FIFOFixer.scala:79:27] reg fixer_flight_30; // @[FIFOFixer.scala:79:27] reg fixer_flight_31; // @[FIFOFixer.scala:79:27] reg fixer_flight_32; // @[FIFOFixer.scala:79:27] reg fixer_flight_33; // @[FIFOFixer.scala:79:27] reg fixer_flight_34; // @[FIFOFixer.scala:79:27] reg fixer_flight_35; // @[FIFOFixer.scala:79:27] reg fixer_flight_36; // @[FIFOFixer.scala:79:27] reg fixer_flight_37; // @[FIFOFixer.scala:79:27] reg fixer_flight_38; // @[FIFOFixer.scala:79:27] reg fixer_flight_39; // @[FIFOFixer.scala:79:27] wire fixer__T_2 = fixer_anonIn_d_ready & fixer_anonIn_d_valid; // @[Decoupled.scala:51:35] assign fixer_anonOut_a_valid = fixer__anonOut_a_valid_T_2; // @[FIFOFixer.scala:95:33] assign fixer_anonIn_a_ready = fixer__anonIn_a_ready_T_2; // @[FIFOFixer.scala:96:33] reg [39:0] fixer_SourceIdFIFOed; // @[FIFOFixer.scala:115:35] wire [39:0] fixer_SourceIdSet; // @[FIFOFixer.scala:116:36] wire [39:0] fixer_SourceIdClear; // @[FIFOFixer.scala:117:38] wire [63:0] fixer__SourceIdSet_T = 64'h1 << fixer_anonIn_a_bits_source; // @[OneHot.scala:58:35] assign fixer_SourceIdSet = fixer_a_first & fixer__a_first_T ? fixer__SourceIdSet_T[39:0] : 40'h0; // @[OneHot.scala:58:35] wire [63:0] fixer__SourceIdClear_T = 64'h1 << fixer_anonIn_d_bits_source; // @[OneHot.scala:58:35] assign fixer_SourceIdClear = fixer_d_first & fixer__T_2 ? fixer__SourceIdClear_T[39:0] : 40'h0; // @[OneHot.scala:58:35] wire [39:0] fixer__SourceIdFIFOed_T = fixer_SourceIdFIFOed | fixer_SourceIdSet; // @[FIFOFixer.scala:115:35, :116:36, :126:40] wire fixer_allIDs_FIFOed = &fixer_SourceIdFIFOed; // @[FIFOFixer.scala:115:35, :127:41] wire buffer_nodeIn_a_ready; // @[MixedNode.scala:551:17] wire bus_xingOut_a_ready = buffer_auto_in_a_ready; // @[Buffer.scala:40:9] wire bus_xingOut_a_valid; // @[MixedNode.scala:542:17] wire buffer_nodeIn_a_valid = buffer_auto_in_a_valid; // @[Buffer.scala:40:9] wire [2:0] bus_xingOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_a_bits_opcode = buffer_auto_in_a_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] bus_xingOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_a_bits_param = buffer_auto_in_a_bits_param; // @[Buffer.scala:40:9] wire [2:0] bus_xingOut_a_bits_size; // @[MixedNode.scala:542:17] wire [2:0] buffer_nodeIn_a_bits_size = buffer_auto_in_a_bits_size; // @[Buffer.scala:40:9] wire [5:0] bus_xingOut_a_bits_source; // @[MixedNode.scala:542:17] wire [5:0] buffer_nodeIn_a_bits_source = buffer_auto_in_a_bits_source; // @[Buffer.scala:40:9] wire [31:0] bus_xingOut_a_bits_address; // @[MixedNode.scala:542:17] wire [31:0] buffer_nodeIn_a_bits_address = buffer_auto_in_a_bits_address; // @[Buffer.scala:40:9] wire [7:0] bus_xingOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [7:0] buffer_nodeIn_a_bits_mask = buffer_auto_in_a_bits_mask; // @[Buffer.scala:40:9] wire [63:0] bus_xingOut_a_bits_data; // @[MixedNode.scala:542:17] wire [63:0] buffer_nodeIn_a_bits_data = buffer_auto_in_a_bits_data; // @[Buffer.scala:40:9] wire bus_xingOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire buffer_nodeIn_a_bits_corrupt = buffer_auto_in_a_bits_corrupt; // @[Buffer.scala:40:9] wire bus_xingOut_d_ready; // @[MixedNode.scala:542:17] wire buffer_nodeIn_d_ready = buffer_auto_in_d_ready; // @[Buffer.scala:40:9] wire buffer_nodeIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] buffer_nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire bus_xingOut_d_valid = buffer_auto_in_d_valid; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeIn_d_bits_param; // @[MixedNode.scala:551:17] wire [2:0] bus_xingOut_d_bits_opcode = buffer_auto_in_d_bits_opcode; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeIn_d_bits_size; // @[MixedNode.scala:551:17] wire [1:0] bus_xingOut_d_bits_param = buffer_auto_in_d_bits_param; // @[Buffer.scala:40:9] wire [5:0] buffer_nodeIn_d_bits_source; // @[MixedNode.scala:551:17] wire [2:0] bus_xingOut_d_bits_size = buffer_auto_in_d_bits_size; // @[Buffer.scala:40:9] wire buffer_nodeIn_d_bits_sink; // @[MixedNode.scala:551:17] wire [5:0] bus_xingOut_d_bits_source = buffer_auto_in_d_bits_source; // @[Buffer.scala:40:9] wire buffer_nodeIn_d_bits_denied; // @[MixedNode.scala:551:17] wire bus_xingOut_d_bits_sink = buffer_auto_in_d_bits_sink; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeIn_d_bits_data; // @[MixedNode.scala:551:17] wire bus_xingOut_d_bits_denied = buffer_auto_in_d_bits_denied; // @[Buffer.scala:40:9] wire buffer_nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire [63:0] bus_xingOut_d_bits_data = buffer_auto_in_d_bits_data; // @[Buffer.scala:40:9] wire bus_xingOut_d_bits_corrupt = buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:40:9] wire buffer_nodeOut_a_ready = buffer_auto_out_a_ready; // @[Buffer.scala:40:9] wire buffer_nodeOut_a_valid; // @[MixedNode.scala:542:17] assign fixer_auto_anon_in_a_valid = buffer_auto_out_a_valid; // @[FIFOFixer.scala:50:9] wire [2:0] buffer_nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] assign fixer_auto_anon_in_a_bits_opcode = buffer_auto_out_a_bits_opcode; // @[FIFOFixer.scala:50:9] wire [2:0] buffer_nodeOut_a_bits_param; // @[MixedNode.scala:542:17] assign fixer_auto_anon_in_a_bits_param = buffer_auto_out_a_bits_param; // @[FIFOFixer.scala:50:9] wire [2:0] buffer_nodeOut_a_bits_size; // @[MixedNode.scala:542:17] assign fixer_auto_anon_in_a_bits_size = buffer_auto_out_a_bits_size; // @[FIFOFixer.scala:50:9] wire [5:0] buffer_nodeOut_a_bits_source; // @[MixedNode.scala:542:17] assign fixer_auto_anon_in_a_bits_source = buffer_auto_out_a_bits_source; // @[FIFOFixer.scala:50:9] wire [31:0] buffer_nodeOut_a_bits_address; // @[MixedNode.scala:542:17] assign fixer_auto_anon_in_a_bits_address = buffer_auto_out_a_bits_address; // @[FIFOFixer.scala:50:9] wire [7:0] buffer_nodeOut_a_bits_mask; // @[MixedNode.scala:542:17] assign fixer_auto_anon_in_a_bits_mask = buffer_auto_out_a_bits_mask; // @[FIFOFixer.scala:50:9] wire [63:0] buffer_nodeOut_a_bits_data; // @[MixedNode.scala:542:17] assign fixer_auto_anon_in_a_bits_data = buffer_auto_out_a_bits_data; // @[FIFOFixer.scala:50:9] wire buffer_nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17] assign fixer_auto_anon_in_a_bits_corrupt = buffer_auto_out_a_bits_corrupt; // @[FIFOFixer.scala:50:9] wire buffer_nodeOut_d_ready; // @[MixedNode.scala:542:17] assign fixer_auto_anon_in_d_ready = buffer_auto_out_d_ready; // @[FIFOFixer.scala:50:9] wire buffer_nodeOut_d_valid = buffer_auto_out_d_valid; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_d_bits_opcode = buffer_auto_out_d_bits_opcode; // @[Buffer.scala:40:9] wire [1:0] buffer_nodeOut_d_bits_param = buffer_auto_out_d_bits_param; // @[Buffer.scala:40:9] wire [2:0] buffer_nodeOut_d_bits_size = buffer_auto_out_d_bits_size; // @[Buffer.scala:40:9] wire [5:0] buffer_nodeOut_d_bits_source = buffer_auto_out_d_bits_source; // @[Buffer.scala:40:9] wire buffer_nodeOut_d_bits_sink = buffer_auto_out_d_bits_sink; // @[Buffer.scala:40:9] wire buffer_nodeOut_d_bits_denied = buffer_auto_out_d_bits_denied; // @[Buffer.scala:40:9] wire [63:0] buffer_nodeOut_d_bits_data = buffer_auto_out_d_bits_data; // @[Buffer.scala:40:9] wire buffer_nodeOut_d_bits_corrupt = buffer_auto_out_d_bits_corrupt; // @[Buffer.scala:40:9] assign buffer_nodeIn_a_ready = buffer_nodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_out_a_valid = buffer_nodeOut_a_valid; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_opcode = buffer_nodeOut_a_bits_opcode; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_param = buffer_nodeOut_a_bits_param; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_size = buffer_nodeOut_a_bits_size; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_source = buffer_nodeOut_a_bits_source; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_address = buffer_nodeOut_a_bits_address; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_mask = buffer_nodeOut_a_bits_mask; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_data = buffer_nodeOut_a_bits_data; // @[Buffer.scala:40:9] assign buffer_auto_out_a_bits_corrupt = buffer_nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9] assign buffer_auto_out_d_ready = buffer_nodeOut_d_ready; // @[Buffer.scala:40:9] assign buffer_nodeIn_d_valid = buffer_nodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_opcode = buffer_nodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_param = buffer_nodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_size = buffer_nodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_source = buffer_nodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_sink = buffer_nodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_denied = buffer_nodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_data = buffer_nodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeIn_d_bits_corrupt = buffer_nodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_in_a_ready = buffer_nodeIn_a_ready; // @[Buffer.scala:40:9] assign buffer_nodeOut_a_valid = buffer_nodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_opcode = buffer_nodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_param = buffer_nodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_size = buffer_nodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_source = buffer_nodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_address = buffer_nodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_mask = buffer_nodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_data = buffer_nodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_a_bits_corrupt = buffer_nodeIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign buffer_nodeOut_d_ready = buffer_nodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_in_d_valid = buffer_nodeIn_d_valid; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_opcode = buffer_nodeIn_d_bits_opcode; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_param = buffer_nodeIn_d_bits_param; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_size = buffer_nodeIn_d_bits_size; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_source = buffer_nodeIn_d_bits_source; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_sink = buffer_nodeIn_d_bits_sink; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_denied = buffer_nodeIn_d_bits_denied; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_data = buffer_nodeIn_d_bits_data; // @[Buffer.scala:40:9] assign buffer_auto_in_d_bits_corrupt = buffer_nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9] wire xbar_anonIn_a_ready; // @[MixedNode.scala:551:17] wire xbar_anonIn_a_valid = xbar_auto_anon_in_a_valid; // @[Xbar.scala:74:9] wire [2:0] xbar_anonIn_a_bits_opcode = xbar_auto_anon_in_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] xbar_anonIn_a_bits_param = xbar_auto_anon_in_a_bits_param; // @[Xbar.scala:74:9] wire [2:0] xbar_anonIn_a_bits_size = xbar_auto_anon_in_a_bits_size; // @[Xbar.scala:74:9] wire [5:0] xbar_anonIn_a_bits_source = xbar_auto_anon_in_a_bits_source; // @[Xbar.scala:74:9] wire [31:0] xbar_anonIn_a_bits_address = xbar_auto_anon_in_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] xbar_anonIn_a_bits_mask = xbar_auto_anon_in_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] xbar_anonIn_a_bits_data = xbar_auto_anon_in_a_bits_data; // @[Xbar.scala:74:9] wire xbar_anonIn_a_bits_corrupt = xbar_auto_anon_in_a_bits_corrupt; // @[Xbar.scala:74:9] wire xbar_anonIn_d_ready = xbar_auto_anon_in_d_ready; // @[Xbar.scala:74:9] wire xbar_anonIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] xbar_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [2:0] xbar_anonIn_d_bits_size; // @[MixedNode.scala:551:17] wire [5:0] xbar_anonIn_d_bits_source; // @[MixedNode.scala:551:17] wire xbar_anonIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] xbar_anonIn_d_bits_data; // @[MixedNode.scala:551:17] wire xbar_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire xbar_anonOut_a_ready = xbar_auto_anon_out_a_ready; // @[Xbar.scala:74:9] wire xbar_anonOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] xbar_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] xbar_anonOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] xbar_anonOut_a_bits_size; // @[MixedNode.scala:542:17] wire [5:0] xbar_anonOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] xbar_anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] xbar_anonOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] xbar_anonOut_a_bits_data; // @[MixedNode.scala:542:17] wire xbar_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire xbar_anonOut_d_ready; // @[MixedNode.scala:542:17] wire xbar_anonOut_d_valid = xbar_auto_anon_out_d_valid; // @[Xbar.scala:74:9] wire [2:0] xbar_anonOut_d_bits_opcode = xbar_auto_anon_out_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] xbar_anonOut_d_bits_size = xbar_auto_anon_out_d_bits_size; // @[Xbar.scala:74:9] wire [5:0] xbar_anonOut_d_bits_source = xbar_auto_anon_out_d_bits_source; // @[Xbar.scala:74:9] wire xbar_anonOut_d_bits_denied = xbar_auto_anon_out_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] xbar_anonOut_d_bits_data = xbar_auto_anon_out_d_bits_data; // @[Xbar.scala:74:9] wire xbar_anonOut_d_bits_corrupt = xbar_auto_anon_out_d_bits_corrupt; // @[Xbar.scala:74:9] wire xbar_auto_anon_in_a_ready; // @[Xbar.scala:74:9] wire [2:0] xbar_auto_anon_in_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] xbar_auto_anon_in_d_bits_size; // @[Xbar.scala:74:9] wire [5:0] xbar_auto_anon_in_d_bits_source; // @[Xbar.scala:74:9] wire xbar_auto_anon_in_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] xbar_auto_anon_in_d_bits_data; // @[Xbar.scala:74:9] wire xbar_auto_anon_in_d_bits_corrupt; // @[Xbar.scala:74:9] wire xbar_auto_anon_in_d_valid; // @[Xbar.scala:74:9] wire [2:0] xbar_auto_anon_out_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] xbar_auto_anon_out_a_bits_param; // @[Xbar.scala:74:9] wire [2:0] xbar_auto_anon_out_a_bits_size; // @[Xbar.scala:74:9] wire [5:0] xbar_auto_anon_out_a_bits_source; // @[Xbar.scala:74:9] wire [31:0] xbar_auto_anon_out_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] xbar_auto_anon_out_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] xbar_auto_anon_out_a_bits_data; // @[Xbar.scala:74:9] wire xbar_auto_anon_out_a_bits_corrupt; // @[Xbar.scala:74:9] wire xbar_auto_anon_out_a_valid; // @[Xbar.scala:74:9] wire xbar_auto_anon_out_d_ready; // @[Xbar.scala:74:9] wire xbar_out_0_a_ready = xbar_anonOut_a_ready; // @[Xbar.scala:216:19] wire xbar_out_0_a_valid; // @[Xbar.scala:216:19] assign xbar_auto_anon_out_a_valid = xbar_anonOut_a_valid; // @[Xbar.scala:74:9] wire [2:0] xbar_out_0_a_bits_opcode; // @[Xbar.scala:216:19] assign xbar_auto_anon_out_a_bits_opcode = xbar_anonOut_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] xbar_out_0_a_bits_param; // @[Xbar.scala:216:19] assign xbar_auto_anon_out_a_bits_param = xbar_anonOut_a_bits_param; // @[Xbar.scala:74:9] wire [2:0] xbar_out_0_a_bits_size; // @[Xbar.scala:216:19] assign xbar_auto_anon_out_a_bits_size = xbar_anonOut_a_bits_size; // @[Xbar.scala:74:9] wire [5:0] xbar_out_0_a_bits_source; // @[Xbar.scala:216:19] assign xbar_auto_anon_out_a_bits_source = xbar_anonOut_a_bits_source; // @[Xbar.scala:74:9] wire [31:0] xbar_out_0_a_bits_address; // @[Xbar.scala:216:19] assign xbar_auto_anon_out_a_bits_address = xbar_anonOut_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] xbar_out_0_a_bits_mask; // @[Xbar.scala:216:19] assign xbar_auto_anon_out_a_bits_mask = xbar_anonOut_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] xbar_out_0_a_bits_data; // @[Xbar.scala:216:19] assign xbar_auto_anon_out_a_bits_data = xbar_anonOut_a_bits_data; // @[Xbar.scala:74:9] wire xbar_out_0_a_bits_corrupt; // @[Xbar.scala:216:19] assign xbar_auto_anon_out_a_bits_corrupt = xbar_anonOut_a_bits_corrupt; // @[Xbar.scala:74:9] wire xbar_out_0_d_ready; // @[Xbar.scala:216:19] assign xbar_auto_anon_out_d_ready = xbar_anonOut_d_ready; // @[Xbar.scala:74:9] wire xbar_out_0_d_valid = xbar_anonOut_d_valid; // @[Xbar.scala:216:19] wire [2:0] xbar_out_0_d_bits_opcode = xbar_anonOut_d_bits_opcode; // @[Xbar.scala:216:19] wire [2:0] xbar_out_0_d_bits_size = xbar_anonOut_d_bits_size; // @[Xbar.scala:216:19] wire [5:0] xbar_out_0_d_bits_source = xbar_anonOut_d_bits_source; // @[Xbar.scala:216:19] wire xbar_out_0_d_bits_denied = xbar_anonOut_d_bits_denied; // @[Xbar.scala:216:19] wire [63:0] xbar_out_0_d_bits_data = xbar_anonOut_d_bits_data; // @[Xbar.scala:216:19] wire xbar_out_0_d_bits_corrupt = xbar_anonOut_d_bits_corrupt; // @[Xbar.scala:216:19] wire xbar_in_0_a_ready; // @[Xbar.scala:159:18] assign xbar_auto_anon_in_a_ready = xbar_anonIn_a_ready; // @[Xbar.scala:74:9] wire xbar_in_0_a_valid = xbar_anonIn_a_valid; // @[Xbar.scala:159:18] wire [2:0] xbar_in_0_a_bits_opcode = xbar_anonIn_a_bits_opcode; // @[Xbar.scala:159:18] wire [2:0] xbar_in_0_a_bits_param = xbar_anonIn_a_bits_param; // @[Xbar.scala:159:18] wire [2:0] xbar_in_0_a_bits_size = xbar_anonIn_a_bits_size; // @[Xbar.scala:159:18] wire [5:0] xbar__in_0_a_bits_source_T = xbar_anonIn_a_bits_source; // @[Xbar.scala:166:55] wire [31:0] xbar_in_0_a_bits_address = xbar_anonIn_a_bits_address; // @[Xbar.scala:159:18] wire [7:0] xbar_in_0_a_bits_mask = xbar_anonIn_a_bits_mask; // @[Xbar.scala:159:18] wire [63:0] xbar_in_0_a_bits_data = xbar_anonIn_a_bits_data; // @[Xbar.scala:159:18] wire xbar_in_0_a_bits_corrupt = xbar_anonIn_a_bits_corrupt; // @[Xbar.scala:159:18] wire xbar_in_0_d_ready = xbar_anonIn_d_ready; // @[Xbar.scala:159:18] wire xbar_in_0_d_valid; // @[Xbar.scala:159:18] assign xbar_auto_anon_in_d_valid = xbar_anonIn_d_valid; // @[Xbar.scala:74:9] wire [2:0] xbar_in_0_d_bits_opcode; // @[Xbar.scala:159:18] assign xbar_auto_anon_in_d_bits_opcode = xbar_anonIn_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] xbar_in_0_d_bits_size; // @[Xbar.scala:159:18] assign xbar_auto_anon_in_d_bits_size = xbar_anonIn_d_bits_size; // @[Xbar.scala:74:9] wire [5:0] xbar__anonIn_d_bits_source_T; // @[Xbar.scala:156:69] assign xbar_auto_anon_in_d_bits_source = xbar_anonIn_d_bits_source; // @[Xbar.scala:74:9] wire xbar_in_0_d_bits_denied; // @[Xbar.scala:159:18] assign xbar_auto_anon_in_d_bits_denied = xbar_anonIn_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] xbar_in_0_d_bits_data; // @[Xbar.scala:159:18] assign xbar_auto_anon_in_d_bits_data = xbar_anonIn_d_bits_data; // @[Xbar.scala:74:9] wire xbar_in_0_d_bits_corrupt; // @[Xbar.scala:159:18] assign xbar_auto_anon_in_d_bits_corrupt = xbar_anonIn_d_bits_corrupt; // @[Xbar.scala:74:9] wire xbar_portsAOI_filtered_0_ready; // @[Xbar.scala:352:24] assign xbar_anonIn_a_ready = xbar_in_0_a_ready; // @[Xbar.scala:159:18] wire xbar__portsAOI_filtered_0_valid_T_1 = xbar_in_0_a_valid; // @[Xbar.scala:159:18, :355:40] wire [2:0] xbar_portsAOI_filtered_0_bits_opcode = xbar_in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] xbar_portsAOI_filtered_0_bits_param = xbar_in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] xbar_portsAOI_filtered_0_bits_size = xbar_in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [5:0] xbar_portsAOI_filtered_0_bits_source = xbar_in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [31:0] xbar__requestAIO_T = xbar_in_0_a_bits_address; // @[Xbar.scala:159:18] wire [31:0] xbar_portsAOI_filtered_0_bits_address = xbar_in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [7:0] xbar_portsAOI_filtered_0_bits_mask = xbar_in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [63:0] xbar_portsAOI_filtered_0_bits_data = xbar_in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire xbar_portsAOI_filtered_0_bits_corrupt = xbar_in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire xbar_portsDIO_filtered_0_ready = xbar_in_0_d_ready; // @[Xbar.scala:159:18, :352:24] wire xbar_portsDIO_filtered_0_valid; // @[Xbar.scala:352:24] assign xbar_anonIn_d_valid = xbar_in_0_d_valid; // @[Xbar.scala:159:18] wire [2:0] xbar_portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:352:24] assign xbar_anonIn_d_bits_opcode = xbar_in_0_d_bits_opcode; // @[Xbar.scala:159:18] wire [2:0] xbar_portsDIO_filtered_0_bits_size; // @[Xbar.scala:352:24] assign xbar_anonIn_d_bits_size = xbar_in_0_d_bits_size; // @[Xbar.scala:159:18] wire [5:0] xbar_portsDIO_filtered_0_bits_source; // @[Xbar.scala:352:24] assign xbar__anonIn_d_bits_source_T = xbar_in_0_d_bits_source; // @[Xbar.scala:156:69, :159:18] wire xbar_portsDIO_filtered_0_bits_denied; // @[Xbar.scala:352:24] assign xbar_anonIn_d_bits_denied = xbar_in_0_d_bits_denied; // @[Xbar.scala:159:18] wire [63:0] xbar_portsDIO_filtered_0_bits_data; // @[Xbar.scala:352:24] assign xbar_anonIn_d_bits_data = xbar_in_0_d_bits_data; // @[Xbar.scala:159:18] wire xbar_portsDIO_filtered_0_bits_corrupt; // @[Xbar.scala:352:24] assign xbar_anonIn_d_bits_corrupt = xbar_in_0_d_bits_corrupt; // @[Xbar.scala:159:18] assign xbar_in_0_a_bits_source = xbar__in_0_a_bits_source_T; // @[Xbar.scala:159:18, :166:55] assign xbar_anonIn_d_bits_source = xbar__anonIn_d_bits_source_T; // @[Xbar.scala:156:69] assign xbar_portsAOI_filtered_0_ready = xbar_out_0_a_ready; // @[Xbar.scala:216:19, :352:24] wire xbar_portsAOI_filtered_0_valid; // @[Xbar.scala:352:24] assign xbar_anonOut_a_valid = xbar_out_0_a_valid; // @[Xbar.scala:216:19] assign xbar_anonOut_a_bits_opcode = xbar_out_0_a_bits_opcode; // @[Xbar.scala:216:19] assign xbar_anonOut_a_bits_param = xbar_out_0_a_bits_param; // @[Xbar.scala:216:19] assign xbar_anonOut_a_bits_size = xbar_out_0_a_bits_size; // @[Xbar.scala:216:19] assign xbar_anonOut_a_bits_source = xbar_out_0_a_bits_source; // @[Xbar.scala:216:19] assign xbar_anonOut_a_bits_address = xbar_out_0_a_bits_address; // @[Xbar.scala:216:19] assign xbar_anonOut_a_bits_mask = xbar_out_0_a_bits_mask; // @[Xbar.scala:216:19] assign xbar_anonOut_a_bits_data = xbar_out_0_a_bits_data; // @[Xbar.scala:216:19] assign xbar_anonOut_a_bits_corrupt = xbar_out_0_a_bits_corrupt; // @[Xbar.scala:216:19] assign xbar_anonOut_d_ready = xbar_out_0_d_ready; // @[Xbar.scala:216:19] wire xbar__portsDIO_filtered_0_valid_T_1 = xbar_out_0_d_valid; // @[Xbar.scala:216:19, :355:40] assign xbar_portsDIO_filtered_0_bits_opcode = xbar_out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign xbar_portsDIO_filtered_0_bits_size = xbar_out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [5:0] xbar__requestDOI_uncommonBits_T = xbar_out_0_d_bits_source; // @[Xbar.scala:216:19] assign xbar_portsDIO_filtered_0_bits_source = xbar_out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24] assign xbar_portsDIO_filtered_0_bits_denied = xbar_out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24] assign xbar_portsDIO_filtered_0_bits_data = xbar_out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24] assign xbar_portsDIO_filtered_0_bits_corrupt = xbar_out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire [32:0] xbar__requestAIO_T_1 = {1'h0, xbar__requestAIO_T}; // @[Parameters.scala:137:{31,41}] wire [5:0] xbar_requestDOI_uncommonBits = xbar__requestDOI_uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [12:0] xbar__beatsAI_decode_T = 13'h3F << xbar_in_0_a_bits_size; // @[package.scala:243:71] wire [5:0] xbar__beatsAI_decode_T_1 = xbar__beatsAI_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] xbar__beatsAI_decode_T_2 = ~xbar__beatsAI_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] xbar_beatsAI_decode = xbar__beatsAI_decode_T_2[5:3]; // @[package.scala:243:46] wire xbar__beatsAI_opdata_T = xbar_in_0_a_bits_opcode[2]; // @[Xbar.scala:159:18] wire xbar_beatsAI_opdata = ~xbar__beatsAI_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] xbar_beatsAI_0 = xbar_beatsAI_opdata ? xbar_beatsAI_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] wire [12:0] xbar__beatsDO_decode_T = 13'h3F << xbar_out_0_d_bits_size; // @[package.scala:243:71] wire [5:0] xbar__beatsDO_decode_T_1 = xbar__beatsDO_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] xbar__beatsDO_decode_T_2 = ~xbar__beatsDO_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] xbar_beatsDO_decode = xbar__beatsDO_decode_T_2[5:3]; // @[package.scala:243:46] wire xbar_beatsDO_opdata = xbar_out_0_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] xbar_beatsDO_0 = xbar_beatsDO_opdata ? xbar_beatsDO_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] assign xbar_in_0_a_ready = xbar_portsAOI_filtered_0_ready; // @[Xbar.scala:159:18, :352:24] assign xbar_out_0_a_valid = xbar_portsAOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24] assign xbar_out_0_a_bits_opcode = xbar_portsAOI_filtered_0_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign xbar_out_0_a_bits_param = xbar_portsAOI_filtered_0_bits_param; // @[Xbar.scala:216:19, :352:24] assign xbar_out_0_a_bits_size = xbar_portsAOI_filtered_0_bits_size; // @[Xbar.scala:216:19, :352:24] assign xbar_out_0_a_bits_source = xbar_portsAOI_filtered_0_bits_source; // @[Xbar.scala:216:19, :352:24] assign xbar_out_0_a_bits_address = xbar_portsAOI_filtered_0_bits_address; // @[Xbar.scala:216:19, :352:24] assign xbar_out_0_a_bits_mask = xbar_portsAOI_filtered_0_bits_mask; // @[Xbar.scala:216:19, :352:24] assign xbar_out_0_a_bits_data = xbar_portsAOI_filtered_0_bits_data; // @[Xbar.scala:216:19, :352:24] assign xbar_out_0_a_bits_corrupt = xbar_portsAOI_filtered_0_bits_corrupt; // @[Xbar.scala:216:19, :352:24] assign xbar_portsAOI_filtered_0_valid = xbar__portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign xbar_out_0_d_ready = xbar_portsDIO_filtered_0_ready; // @[Xbar.scala:216:19, :352:24] assign xbar_in_0_d_valid = xbar_portsDIO_filtered_0_valid; // @[Xbar.scala:159:18, :352:24] assign xbar_in_0_d_bits_opcode = xbar_portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:159:18, :352:24] assign xbar_in_0_d_bits_size = xbar_portsDIO_filtered_0_bits_size; // @[Xbar.scala:159:18, :352:24] assign xbar_in_0_d_bits_source = xbar_portsDIO_filtered_0_bits_source; // @[Xbar.scala:159:18, :352:24] assign xbar_in_0_d_bits_denied = xbar_portsDIO_filtered_0_bits_denied; // @[Xbar.scala:159:18, :352:24] assign xbar_in_0_d_bits_data = xbar_portsDIO_filtered_0_bits_data; // @[Xbar.scala:159:18, :352:24] assign xbar_in_0_d_bits_corrupt = xbar_portsDIO_filtered_0_bits_corrupt; // @[Xbar.scala:159:18, :352:24] assign xbar_portsDIO_filtered_0_valid = xbar__portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40] wire coupler_to_mbusscratchpad00_tlIn_a_ready; // @[MixedNode.scala:551:17] wire coupler_to_mbusscratchpad00_tlIn_a_valid = coupler_to_mbusscratchpad00_auto_tl_in_a_valid; // @[MixedNode.scala:551:17] wire [2:0] coupler_to_mbusscratchpad00_tlIn_a_bits_opcode = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_opcode; // @[MixedNode.scala:551:17] wire [2:0] coupler_to_mbusscratchpad00_tlIn_a_bits_param = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_param; // @[MixedNode.scala:551:17] wire [2:0] coupler_to_mbusscratchpad00_tlIn_a_bits_size = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_size; // @[MixedNode.scala:551:17] wire [5:0] coupler_to_mbusscratchpad00_tlIn_a_bits_source = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_source; // @[MixedNode.scala:551:17] wire [27:0] coupler_to_mbusscratchpad00_tlIn_a_bits_address = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_address; // @[MixedNode.scala:551:17] wire [7:0] coupler_to_mbusscratchpad00_tlIn_a_bits_mask = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_mask; // @[MixedNode.scala:551:17] wire [63:0] coupler_to_mbusscratchpad00_tlIn_a_bits_data = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_data; // @[MixedNode.scala:551:17] wire coupler_to_mbusscratchpad00_tlIn_a_bits_corrupt = coupler_to_mbusscratchpad00_auto_tl_in_a_bits_corrupt; // @[MixedNode.scala:551:17] wire coupler_to_mbusscratchpad00_tlIn_d_ready = coupler_to_mbusscratchpad00_auto_tl_in_d_ready; // @[MixedNode.scala:551:17] wire coupler_to_mbusscratchpad00_tlIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] coupler_to_mbusscratchpad00_tlIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] coupler_to_mbusscratchpad00_tlIn_d_bits_param; // @[MixedNode.scala:551:17] wire [2:0] coupler_to_mbusscratchpad00_tlIn_d_bits_size; // @[MixedNode.scala:551:17] wire [5:0] coupler_to_mbusscratchpad00_tlIn_d_bits_source; // @[MixedNode.scala:551:17] wire coupler_to_mbusscratchpad00_tlIn_d_bits_sink; // @[MixedNode.scala:551:17] wire coupler_to_mbusscratchpad00_tlIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] coupler_to_mbusscratchpad00_tlIn_d_bits_data; // @[MixedNode.scala:551:17] wire coupler_to_mbusscratchpad00_tlIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire coupler_to_mbusscratchpad00_tlOut_a_ready = coupler_to_mbusscratchpad00_auto_tl_out_a_ready; // @[MixedNode.scala:542:17] wire coupler_to_mbusscratchpad00_tlOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] coupler_to_mbusscratchpad00_tlOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] coupler_to_mbusscratchpad00_tlOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] coupler_to_mbusscratchpad00_tlOut_a_bits_size; // @[MixedNode.scala:542:17] wire [5:0] coupler_to_mbusscratchpad00_tlOut_a_bits_source; // @[MixedNode.scala:542:17] wire [27:0] coupler_to_mbusscratchpad00_tlOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] coupler_to_mbusscratchpad00_tlOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] coupler_to_mbusscratchpad00_tlOut_a_bits_data; // @[MixedNode.scala:542:17] wire coupler_to_mbusscratchpad00_tlOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire coupler_to_mbusscratchpad00_tlOut_d_ready; // @[MixedNode.scala:542:17] wire coupler_to_mbusscratchpad00_tlOut_d_valid = coupler_to_mbusscratchpad00_auto_tl_out_d_valid; // @[MixedNode.scala:542:17] wire [2:0] coupler_to_mbusscratchpad00_tlOut_d_bits_opcode = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_opcode; // @[MixedNode.scala:542:17] wire [1:0] coupler_to_mbusscratchpad00_tlOut_d_bits_param = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_param; // @[MixedNode.scala:542:17] wire [2:0] coupler_to_mbusscratchpad00_tlOut_d_bits_size = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_size; // @[MixedNode.scala:542:17] wire [5:0] coupler_to_mbusscratchpad00_tlOut_d_bits_source = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_source; // @[MixedNode.scala:542:17] wire coupler_to_mbusscratchpad00_tlOut_d_bits_sink = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_sink; // @[MixedNode.scala:542:17] wire coupler_to_mbusscratchpad00_tlOut_d_bits_denied = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_denied; // @[MixedNode.scala:542:17] wire [63:0] coupler_to_mbusscratchpad00_tlOut_d_bits_data = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_data; // @[MixedNode.scala:542:17] wire coupler_to_mbusscratchpad00_tlOut_d_bits_corrupt = coupler_to_mbusscratchpad00_auto_tl_out_d_bits_corrupt; // @[MixedNode.scala:542:17] wire coupler_to_mbusscratchpad00_auto_tl_in_a_ready; // @[LazyModuleImp.scala:138:7] wire [2:0] coupler_to_mbusscratchpad00_auto_tl_in_d_bits_opcode; // @[LazyModuleImp.scala:138:7] wire [1:0] coupler_to_mbusscratchpad00_auto_tl_in_d_bits_param; // @[LazyModuleImp.scala:138:7] wire [2:0] coupler_to_mbusscratchpad00_auto_tl_in_d_bits_size; // @[LazyModuleImp.scala:138:7] wire [5:0] coupler_to_mbusscratchpad00_auto_tl_in_d_bits_source; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_in_d_bits_sink; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_in_d_bits_denied; // @[LazyModuleImp.scala:138:7] wire [63:0] coupler_to_mbusscratchpad00_auto_tl_in_d_bits_data; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_in_d_bits_corrupt; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_in_d_valid; // @[LazyModuleImp.scala:138:7] wire [2:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_opcode; // @[LazyModuleImp.scala:138:7] wire [2:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_param; // @[LazyModuleImp.scala:138:7] wire [2:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_size; // @[LazyModuleImp.scala:138:7] wire [5:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_source; // @[LazyModuleImp.scala:138:7] wire [27:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_address; // @[LazyModuleImp.scala:138:7] wire [7:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_mask; // @[LazyModuleImp.scala:138:7] wire [63:0] coupler_to_mbusscratchpad00_auto_tl_out_a_bits_data; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_out_a_bits_corrupt; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_out_a_valid; // @[LazyModuleImp.scala:138:7] wire coupler_to_mbusscratchpad00_auto_tl_out_d_ready; // @[LazyModuleImp.scala:138:7] assign coupler_to_mbusscratchpad00_tlIn_a_ready = coupler_to_mbusscratchpad00_tlOut_a_ready; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_auto_tl_out_a_valid = coupler_to_mbusscratchpad00_tlOut_a_valid; // @[MixedNode.scala:542:17] assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_opcode = coupler_to_mbusscratchpad00_tlOut_a_bits_opcode; // @[MixedNode.scala:542:17] assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_param = coupler_to_mbusscratchpad00_tlOut_a_bits_param; // @[MixedNode.scala:542:17] assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_size = coupler_to_mbusscratchpad00_tlOut_a_bits_size; // @[MixedNode.scala:542:17] assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_source = coupler_to_mbusscratchpad00_tlOut_a_bits_source; // @[MixedNode.scala:542:17] assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_address = coupler_to_mbusscratchpad00_tlOut_a_bits_address; // @[MixedNode.scala:542:17] assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_mask = coupler_to_mbusscratchpad00_tlOut_a_bits_mask; // @[MixedNode.scala:542:17] assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_data = coupler_to_mbusscratchpad00_tlOut_a_bits_data; // @[MixedNode.scala:542:17] assign coupler_to_mbusscratchpad00_auto_tl_out_a_bits_corrupt = coupler_to_mbusscratchpad00_tlOut_a_bits_corrupt; // @[MixedNode.scala:542:17] assign coupler_to_mbusscratchpad00_auto_tl_out_d_ready = coupler_to_mbusscratchpad00_tlOut_d_ready; // @[MixedNode.scala:542:17] assign coupler_to_mbusscratchpad00_tlIn_d_valid = coupler_to_mbusscratchpad00_tlOut_d_valid; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlIn_d_bits_opcode = coupler_to_mbusscratchpad00_tlOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlIn_d_bits_param = coupler_to_mbusscratchpad00_tlOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlIn_d_bits_size = coupler_to_mbusscratchpad00_tlOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlIn_d_bits_source = coupler_to_mbusscratchpad00_tlOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlIn_d_bits_sink = coupler_to_mbusscratchpad00_tlOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlIn_d_bits_denied = coupler_to_mbusscratchpad00_tlOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlIn_d_bits_data = coupler_to_mbusscratchpad00_tlOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlIn_d_bits_corrupt = coupler_to_mbusscratchpad00_tlOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_auto_tl_in_a_ready = coupler_to_mbusscratchpad00_tlIn_a_ready; // @[MixedNode.scala:551:17] assign coupler_to_mbusscratchpad00_tlOut_a_valid = coupler_to_mbusscratchpad00_tlIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlOut_a_bits_opcode = coupler_to_mbusscratchpad00_tlIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlOut_a_bits_param = coupler_to_mbusscratchpad00_tlIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlOut_a_bits_size = coupler_to_mbusscratchpad00_tlIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlOut_a_bits_source = coupler_to_mbusscratchpad00_tlIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlOut_a_bits_address = coupler_to_mbusscratchpad00_tlIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlOut_a_bits_mask = coupler_to_mbusscratchpad00_tlIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlOut_a_bits_data = coupler_to_mbusscratchpad00_tlIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlOut_a_bits_corrupt = coupler_to_mbusscratchpad00_tlIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_tlOut_d_ready = coupler_to_mbusscratchpad00_tlIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign coupler_to_mbusscratchpad00_auto_tl_in_d_valid = coupler_to_mbusscratchpad00_tlIn_d_valid; // @[MixedNode.scala:551:17] assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_opcode = coupler_to_mbusscratchpad00_tlIn_d_bits_opcode; // @[MixedNode.scala:551:17] assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_param = coupler_to_mbusscratchpad00_tlIn_d_bits_param; // @[MixedNode.scala:551:17] assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_size = coupler_to_mbusscratchpad00_tlIn_d_bits_size; // @[MixedNode.scala:551:17] assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_source = coupler_to_mbusscratchpad00_tlIn_d_bits_source; // @[MixedNode.scala:551:17] assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_sink = coupler_to_mbusscratchpad00_tlIn_d_bits_sink; // @[MixedNode.scala:551:17] assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_denied = coupler_to_mbusscratchpad00_tlIn_d_bits_denied; // @[MixedNode.scala:551:17] assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_data = coupler_to_mbusscratchpad00_tlIn_d_bits_data; // @[MixedNode.scala:551:17] assign coupler_to_mbusscratchpad00_auto_tl_in_d_bits_corrupt = coupler_to_mbusscratchpad00_tlIn_d_bits_corrupt; // @[MixedNode.scala:551:17] assign childClock = clockSinkNodeIn_clock; // @[MixedNode.scala:551:17] assign childReset = clockSinkNodeIn_reset; // @[MixedNode.scala:551:17] assign bus_xingIn_a_ready = bus_xingOut_a_ready; // @[MixedNode.scala:542:17, :551:17] assign buffer_auto_in_a_valid = bus_xingOut_a_valid; // @[Buffer.scala:40:9] assign buffer_auto_in_a_bits_opcode = bus_xingOut_a_bits_opcode; // @[Buffer.scala:40:9] assign buffer_auto_in_a_bits_param = bus_xingOut_a_bits_param; // @[Buffer.scala:40:9] assign buffer_auto_in_a_bits_size = bus_xingOut_a_bits_size; // @[Buffer.scala:40:9] assign buffer_auto_in_a_bits_source = bus_xingOut_a_bits_source; // @[Buffer.scala:40:9] assign buffer_auto_in_a_bits_address = bus_xingOut_a_bits_address; // @[Buffer.scala:40:9] assign buffer_auto_in_a_bits_mask = bus_xingOut_a_bits_mask; // @[Buffer.scala:40:9] assign buffer_auto_in_a_bits_data = bus_xingOut_a_bits_data; // @[Buffer.scala:40:9] assign buffer_auto_in_a_bits_corrupt = bus_xingOut_a_bits_corrupt; // @[Buffer.scala:40:9] assign buffer_auto_in_d_ready = bus_xingOut_d_ready; // @[Buffer.scala:40:9] assign bus_xingIn_d_valid = bus_xingOut_d_valid; // @[MixedNode.scala:542:17, :551:17] assign bus_xingIn_d_bits_opcode = bus_xingOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign bus_xingIn_d_bits_param = bus_xingOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] assign bus_xingIn_d_bits_size = bus_xingOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] assign bus_xingIn_d_bits_source = bus_xingOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] assign bus_xingIn_d_bits_sink = bus_xingOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign bus_xingIn_d_bits_denied = bus_xingOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] assign bus_xingIn_d_bits_data = bus_xingOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] assign bus_xingIn_d_bits_corrupt = bus_xingOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign auto_bus_xing_in_a_ready_0 = bus_xingIn_a_ready; // @[ClockDomain.scala:14:9] assign bus_xingOut_a_valid = bus_xingIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign bus_xingOut_a_bits_opcode = bus_xingIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign bus_xingOut_a_bits_param = bus_xingIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign bus_xingOut_a_bits_size = bus_xingIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign bus_xingOut_a_bits_source = bus_xingIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign bus_xingOut_a_bits_address = bus_xingIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign bus_xingOut_a_bits_mask = bus_xingIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign bus_xingOut_a_bits_data = bus_xingIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign bus_xingOut_a_bits_corrupt = bus_xingIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign bus_xingOut_d_ready = bus_xingIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_bus_xing_in_d_valid_0 = bus_xingIn_d_valid; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_opcode_0 = bus_xingIn_d_bits_opcode; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_param_0 = bus_xingIn_d_bits_param; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_size_0 = bus_xingIn_d_bits_size; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_source_0 = bus_xingIn_d_bits_source; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_sink_0 = bus_xingIn_d_bits_sink; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_denied_0 = bus_xingIn_d_bits_denied; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_data_0 = bus_xingIn_d_bits_data; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_corrupt_0 = bus_xingIn_d_bits_corrupt; // @[ClockDomain.scala:14:9] wire fixer__T_1 = fixer_a_first & fixer__a_first_T; // @[Decoupled.scala:51:35] wire fixer__T_3 = fixer_d_first & fixer__T_2; // @[Decoupled.scala:51:35] always @(posedge childClock) begin // @[LazyModuleImp.scala:155:31] if (childReset) begin // @[LazyModuleImp.scala:155:31, :158:31] fixer_a_first_counter <= 3'h0; // @[Edges.scala:229:27] fixer_d_first_counter <= 3'h0; // @[Edges.scala:229:27] fixer_flight_0 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_1 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_2 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_3 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_4 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_5 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_6 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_7 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_8 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_9 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_10 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_11 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_12 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_13 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_14 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_15 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_16 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_17 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_18 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_19 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_20 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_21 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_22 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_23 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_24 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_25 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_26 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_27 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_28 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_29 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_30 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_31 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_32 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_33 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_34 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_35 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_36 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_37 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_38 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_flight_39 <= 1'h0; // @[FIFOFixer.scala:79:27] fixer_SourceIdFIFOed <= 40'h0; // @[FIFOFixer.scala:115:35] end else begin // @[LazyModuleImp.scala:155:31] if (fixer__a_first_T) // @[Decoupled.scala:51:35] fixer_a_first_counter <= fixer__a_first_counter_T; // @[Edges.scala:229:27, :236:21] if (fixer__d_first_T) // @[Decoupled.scala:51:35] fixer_d_first_counter <= fixer__d_first_counter_T; // @[Edges.scala:229:27, :236:21] fixer_flight_0 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h0) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h0 | fixer_flight_0); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_1 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h1) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h1 | fixer_flight_1); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_2 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h2) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h2 | fixer_flight_2); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_3 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h3) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h3 | fixer_flight_3); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_4 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h4) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h4 | fixer_flight_4); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_5 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h5) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h5 | fixer_flight_5); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_6 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h6) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h6 | fixer_flight_6); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_7 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h7) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h7 | fixer_flight_7); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_8 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h8) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h8 | fixer_flight_8); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_9 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h9) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h9 | fixer_flight_9); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_10 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'hA) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'hA | fixer_flight_10); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_11 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'hB) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'hB | fixer_flight_11); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_12 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'hC) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'hC | fixer_flight_12); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_13 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'hD) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'hD | fixer_flight_13); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_14 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'hE) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'hE | fixer_flight_14); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_15 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'hF) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'hF | fixer_flight_15); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_16 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h10) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h10 | fixer_flight_16); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_17 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h11) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h11 | fixer_flight_17); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_18 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h12) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h12 | fixer_flight_18); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_19 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h13) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h13 | fixer_flight_19); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_20 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h14) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h14 | fixer_flight_20); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_21 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h15) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h15 | fixer_flight_21); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_22 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h16) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h16 | fixer_flight_22); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_23 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h17) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h17 | fixer_flight_23); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_24 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h18) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h18 | fixer_flight_24); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_25 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h19) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h19 | fixer_flight_25); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_26 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h1A) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h1A | fixer_flight_26); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_27 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h1B) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h1B | fixer_flight_27); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_28 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h1C) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h1C | fixer_flight_28); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_29 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h1D) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h1D | fixer_flight_29); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_30 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h1E) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h1E | fixer_flight_30); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_31 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h1F) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h1F | fixer_flight_31); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_32 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h20) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h20 | fixer_flight_32); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_33 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h21) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h21 | fixer_flight_33); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_34 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h22) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h22 | fixer_flight_34); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_35 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h23) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h23 | fixer_flight_35); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_36 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h24) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h24 | fixer_flight_36); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_37 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h25) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h25 | fixer_flight_37); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_38 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h26) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h26 | fixer_flight_38); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_flight_39 <= ~(fixer__T_3 & fixer_anonIn_d_bits_source == 6'h27) & (fixer__T_1 & fixer_anonIn_a_bits_source == 6'h27 | fixer_flight_39); // @[FIFOFixer.scala:79:27, :80:{21,35,62}, :81:{21,35,62}] fixer_SourceIdFIFOed <= fixer__SourceIdFIFOed_T; // @[FIFOFixer.scala:115:35, :126:40] end always @(posedge) FixedClockBroadcast_3 fixedClockNode ( // @[ClockGroup.scala:115:114] .auto_anon_in_clock (clockGroup_auto_out_clock), // @[ClockGroup.scala:24:9] .auto_anon_in_reset (clockGroup_auto_out_reset), // @[ClockGroup.scala:24:9] .auto_anon_out_2_clock (auto_fixedClockNode_anon_out_1_clock_0), .auto_anon_out_2_reset (auto_fixedClockNode_anon_out_1_reset_0), .auto_anon_out_1_clock (auto_fixedClockNode_anon_out_0_clock_0), .auto_anon_out_1_reset (auto_fixedClockNode_anon_out_0_reset_0), .auto_anon_out_0_clock (clockSinkNodeIn_clock), .auto_anon_out_0_reset (clockSinkNodeIn_reset) ); // @[ClockGroup.scala:115:114] TLXbar_mbus_i1_o2_a32d64s6k1z3u mbus_xbar ( // @[MemoryBus.scala:47:32] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset), // @[LazyModuleImp.scala:158:31] .auto_anon_in_a_ready (fixer_auto_anon_out_a_ready), .auto_anon_in_a_valid (fixer_auto_anon_out_a_valid), // @[FIFOFixer.scala:50:9] .auto_anon_in_a_bits_opcode (fixer_auto_anon_out_a_bits_opcode), // @[FIFOFixer.scala:50:9] .auto_anon_in_a_bits_param (fixer_auto_anon_out_a_bits_param), // @[FIFOFixer.scala:50:9] .auto_anon_in_a_bits_size (fixer_auto_anon_out_a_bits_size), // @[FIFOFixer.scala:50:9] .auto_anon_in_a_bits_source (fixer_auto_anon_out_a_bits_source), // @[FIFOFixer.scala:50:9] .auto_anon_in_a_bits_address (fixer_auto_anon_out_a_bits_address), // @[FIFOFixer.scala:50:9] .auto_anon_in_a_bits_mask (fixer_auto_anon_out_a_bits_mask), // @[FIFOFixer.scala:50:9] .auto_anon_in_a_bits_data (fixer_auto_anon_out_a_bits_data), // @[FIFOFixer.scala:50:9] .auto_anon_in_a_bits_corrupt (fixer_auto_anon_out_a_bits_corrupt), // @[FIFOFixer.scala:50:9] .auto_anon_in_d_ready (fixer_auto_anon_out_d_ready), // @[FIFOFixer.scala:50:9] .auto_anon_in_d_valid (fixer_auto_anon_out_d_valid), .auto_anon_in_d_bits_opcode (fixer_auto_anon_out_d_bits_opcode), .auto_anon_in_d_bits_param (fixer_auto_anon_out_d_bits_param), .auto_anon_in_d_bits_size (fixer_auto_anon_out_d_bits_size), .auto_anon_in_d_bits_source (fixer_auto_anon_out_d_bits_source), .auto_anon_in_d_bits_sink (fixer_auto_anon_out_d_bits_sink), .auto_anon_in_d_bits_denied (fixer_auto_anon_out_d_bits_denied), .auto_anon_in_d_bits_data (fixer_auto_anon_out_d_bits_data), .auto_anon_in_d_bits_corrupt (fixer_auto_anon_out_d_bits_corrupt), .auto_anon_out_1_a_ready (_picker_auto_in_1_a_ready), // @[ProbePicker.scala:69:28] .auto_anon_out_1_a_valid (_mbus_xbar_auto_anon_out_1_a_valid), .auto_anon_out_1_a_bits_opcode (_mbus_xbar_auto_anon_out_1_a_bits_opcode), .auto_anon_out_1_a_bits_param (_mbus_xbar_auto_anon_out_1_a_bits_param), .auto_anon_out_1_a_bits_size (_mbus_xbar_auto_anon_out_1_a_bits_size), .auto_anon_out_1_a_bits_source (_mbus_xbar_auto_anon_out_1_a_bits_source), .auto_anon_out_1_a_bits_address (_mbus_xbar_auto_anon_out_1_a_bits_address), .auto_anon_out_1_a_bits_mask (_mbus_xbar_auto_anon_out_1_a_bits_mask), .auto_anon_out_1_a_bits_data (_mbus_xbar_auto_anon_out_1_a_bits_data), .auto_anon_out_1_a_bits_corrupt (_mbus_xbar_auto_anon_out_1_a_bits_corrupt), .auto_anon_out_1_d_ready (_mbus_xbar_auto_anon_out_1_d_ready), .auto_anon_out_1_d_valid (_picker_auto_in_1_d_valid), // @[ProbePicker.scala:69:28] .auto_anon_out_1_d_bits_opcode (_picker_auto_in_1_d_bits_opcode), // @[ProbePicker.scala:69:28] .auto_anon_out_1_d_bits_param (_picker_auto_in_1_d_bits_param), // @[ProbePicker.scala:69:28] .auto_anon_out_1_d_bits_size (_picker_auto_in_1_d_bits_size), // @[ProbePicker.scala:69:28] .auto_anon_out_1_d_bits_source (_picker_auto_in_1_d_bits_source), // @[ProbePicker.scala:69:28] .auto_anon_out_1_d_bits_sink (_picker_auto_in_1_d_bits_sink), // @[ProbePicker.scala:69:28] .auto_anon_out_1_d_bits_denied (_picker_auto_in_1_d_bits_denied), // @[ProbePicker.scala:69:28] .auto_anon_out_1_d_bits_data (_picker_auto_in_1_d_bits_data), // @[ProbePicker.scala:69:28] .auto_anon_out_1_d_bits_corrupt (_picker_auto_in_1_d_bits_corrupt), // @[ProbePicker.scala:69:28] .auto_anon_out_0_a_ready (_picker_auto_in_0_a_ready), // @[ProbePicker.scala:69:28] .auto_anon_out_0_a_valid (_mbus_xbar_auto_anon_out_0_a_valid), .auto_anon_out_0_a_bits_opcode (_mbus_xbar_auto_anon_out_0_a_bits_opcode), .auto_anon_out_0_a_bits_param (_mbus_xbar_auto_anon_out_0_a_bits_param), .auto_anon_out_0_a_bits_size (_mbus_xbar_auto_anon_out_0_a_bits_size), .auto_anon_out_0_a_bits_source (_mbus_xbar_auto_anon_out_0_a_bits_source), .auto_anon_out_0_a_bits_address (_mbus_xbar_auto_anon_out_0_a_bits_address), .auto_anon_out_0_a_bits_mask (_mbus_xbar_auto_anon_out_0_a_bits_mask), .auto_anon_out_0_a_bits_data (_mbus_xbar_auto_anon_out_0_a_bits_data), .auto_anon_out_0_a_bits_corrupt (_mbus_xbar_auto_anon_out_0_a_bits_corrupt), .auto_anon_out_0_d_ready (_mbus_xbar_auto_anon_out_0_d_ready), .auto_anon_out_0_d_valid (_picker_auto_in_0_d_valid), // @[ProbePicker.scala:69:28] .auto_anon_out_0_d_bits_opcode (_picker_auto_in_0_d_bits_opcode), // @[ProbePicker.scala:69:28] .auto_anon_out_0_d_bits_size (_picker_auto_in_0_d_bits_size), // @[ProbePicker.scala:69:28] .auto_anon_out_0_d_bits_source (_picker_auto_in_0_d_bits_source), // @[ProbePicker.scala:69:28] .auto_anon_out_0_d_bits_denied (_picker_auto_in_0_d_bits_denied), // @[ProbePicker.scala:69:28] .auto_anon_out_0_d_bits_data (_picker_auto_in_0_d_bits_data), // @[ProbePicker.scala:69:28] .auto_anon_out_0_d_bits_corrupt (_picker_auto_in_0_d_bits_corrupt) // @[ProbePicker.scala:69:28] ); // @[MemoryBus.scala:47:32] ProbePicker picker ( // @[ProbePicker.scala:69:28] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset), // @[LazyModuleImp.scala:158:31] .auto_in_1_a_ready (_picker_auto_in_1_a_ready), .auto_in_1_a_valid (_mbus_xbar_auto_anon_out_1_a_valid), // @[MemoryBus.scala:47:32] .auto_in_1_a_bits_opcode (_mbus_xbar_auto_anon_out_1_a_bits_opcode), // @[MemoryBus.scala:47:32] .auto_in_1_a_bits_param (_mbus_xbar_auto_anon_out_1_a_bits_param), // @[MemoryBus.scala:47:32] .auto_in_1_a_bits_size (_mbus_xbar_auto_anon_out_1_a_bits_size), // @[MemoryBus.scala:47:32] .auto_in_1_a_bits_source (_mbus_xbar_auto_anon_out_1_a_bits_source), // @[MemoryBus.scala:47:32] .auto_in_1_a_bits_address (_mbus_xbar_auto_anon_out_1_a_bits_address), // @[MemoryBus.scala:47:32] .auto_in_1_a_bits_mask (_mbus_xbar_auto_anon_out_1_a_bits_mask), // @[MemoryBus.scala:47:32] .auto_in_1_a_bits_data (_mbus_xbar_auto_anon_out_1_a_bits_data), // @[MemoryBus.scala:47:32] .auto_in_1_a_bits_corrupt (_mbus_xbar_auto_anon_out_1_a_bits_corrupt), // @[MemoryBus.scala:47:32] .auto_in_1_d_ready (_mbus_xbar_auto_anon_out_1_d_ready), // @[MemoryBus.scala:47:32] .auto_in_1_d_valid (_picker_auto_in_1_d_valid), .auto_in_1_d_bits_opcode (_picker_auto_in_1_d_bits_opcode), .auto_in_1_d_bits_param (_picker_auto_in_1_d_bits_param), .auto_in_1_d_bits_size (_picker_auto_in_1_d_bits_size), .auto_in_1_d_bits_source (_picker_auto_in_1_d_bits_source), .auto_in_1_d_bits_sink (_picker_auto_in_1_d_bits_sink), .auto_in_1_d_bits_denied (_picker_auto_in_1_d_bits_denied), .auto_in_1_d_bits_data (_picker_auto_in_1_d_bits_data), .auto_in_1_d_bits_corrupt (_picker_auto_in_1_d_bits_corrupt), .auto_in_0_a_ready (_picker_auto_in_0_a_ready), .auto_in_0_a_valid (_mbus_xbar_auto_anon_out_0_a_valid), // @[MemoryBus.scala:47:32] .auto_in_0_a_bits_opcode (_mbus_xbar_auto_anon_out_0_a_bits_opcode), // @[MemoryBus.scala:47:32] .auto_in_0_a_bits_param (_mbus_xbar_auto_anon_out_0_a_bits_param), // @[MemoryBus.scala:47:32] .auto_in_0_a_bits_size (_mbus_xbar_auto_anon_out_0_a_bits_size), // @[MemoryBus.scala:47:32] .auto_in_0_a_bits_source (_mbus_xbar_auto_anon_out_0_a_bits_source), // @[MemoryBus.scala:47:32] .auto_in_0_a_bits_address (_mbus_xbar_auto_anon_out_0_a_bits_address), // @[MemoryBus.scala:47:32] .auto_in_0_a_bits_mask (_mbus_xbar_auto_anon_out_0_a_bits_mask), // @[MemoryBus.scala:47:32] .auto_in_0_a_bits_data (_mbus_xbar_auto_anon_out_0_a_bits_data), // @[MemoryBus.scala:47:32] .auto_in_0_a_bits_corrupt (_mbus_xbar_auto_anon_out_0_a_bits_corrupt), // @[MemoryBus.scala:47:32] .auto_in_0_d_ready (_mbus_xbar_auto_anon_out_0_d_ready), // @[MemoryBus.scala:47:32] .auto_in_0_d_valid (_picker_auto_in_0_d_valid), .auto_in_0_d_bits_opcode (_picker_auto_in_0_d_bits_opcode), .auto_in_0_d_bits_size (_picker_auto_in_0_d_bits_size), .auto_in_0_d_bits_source (_picker_auto_in_0_d_bits_source), .auto_in_0_d_bits_denied (_picker_auto_in_0_d_bits_denied), .auto_in_0_d_bits_data (_picker_auto_in_0_d_bits_data), .auto_in_0_d_bits_corrupt (_picker_auto_in_0_d_bits_corrupt), .auto_out_1_a_ready (coupler_to_mbusscratchpad00_auto_tl_in_a_ready), // @[LazyModuleImp.scala:138:7] .auto_out_1_a_valid (coupler_to_mbusscratchpad00_auto_tl_in_a_valid), .auto_out_1_a_bits_opcode (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_opcode), .auto_out_1_a_bits_param (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_param), .auto_out_1_a_bits_size (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_size), .auto_out_1_a_bits_source (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_source), .auto_out_1_a_bits_address (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_address), .auto_out_1_a_bits_mask (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_mask), .auto_out_1_a_bits_data (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_data), .auto_out_1_a_bits_corrupt (coupler_to_mbusscratchpad00_auto_tl_in_a_bits_corrupt), .auto_out_1_d_ready (coupler_to_mbusscratchpad00_auto_tl_in_d_ready), .auto_out_1_d_valid (coupler_to_mbusscratchpad00_auto_tl_in_d_valid), // @[LazyModuleImp.scala:138:7] .auto_out_1_d_bits_opcode (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_opcode), // @[LazyModuleImp.scala:138:7] .auto_out_1_d_bits_param (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_param), // @[LazyModuleImp.scala:138:7] .auto_out_1_d_bits_size (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_size), // @[LazyModuleImp.scala:138:7] .auto_out_1_d_bits_source (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_source), // @[LazyModuleImp.scala:138:7] .auto_out_1_d_bits_sink (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_sink), // @[LazyModuleImp.scala:138:7] .auto_out_1_d_bits_denied (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_denied), // @[LazyModuleImp.scala:138:7] .auto_out_1_d_bits_data (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_data), // @[LazyModuleImp.scala:138:7] .auto_out_1_d_bits_corrupt (coupler_to_mbusscratchpad00_auto_tl_in_d_bits_corrupt), // @[LazyModuleImp.scala:138:7] .auto_out_0_a_ready (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_a_ready), // @[LazyScope.scala:98:27] .auto_out_0_a_valid (_picker_auto_out_0_a_valid), .auto_out_0_a_bits_opcode (_picker_auto_out_0_a_bits_opcode), .auto_out_0_a_bits_param (_picker_auto_out_0_a_bits_param), .auto_out_0_a_bits_size (_picker_auto_out_0_a_bits_size), .auto_out_0_a_bits_source (_picker_auto_out_0_a_bits_source), .auto_out_0_a_bits_address (_picker_auto_out_0_a_bits_address), .auto_out_0_a_bits_mask (_picker_auto_out_0_a_bits_mask), .auto_out_0_a_bits_data (_picker_auto_out_0_a_bits_data), .auto_out_0_a_bits_corrupt (_picker_auto_out_0_a_bits_corrupt), .auto_out_0_d_ready (_picker_auto_out_0_d_ready), .auto_out_0_d_valid (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_valid), // @[LazyScope.scala:98:27] .auto_out_0_d_bits_opcode (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_opcode), // @[LazyScope.scala:98:27] .auto_out_0_d_bits_size (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_size), // @[LazyScope.scala:98:27] .auto_out_0_d_bits_source (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_source), // @[LazyScope.scala:98:27] .auto_out_0_d_bits_denied (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_denied), // @[LazyScope.scala:98:27] .auto_out_0_d_bits_data (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_data), // @[LazyScope.scala:98:27] .auto_out_0_d_bits_corrupt (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_corrupt) // @[LazyScope.scala:98:27] ); // @[ProbePicker.scala:69:28] TLInterconnectCoupler_mbus_to_memory_controller_port_named_tl_mem coupler_to_memory_controller_port_named_tl_mem ( // @[LazyScope.scala:98:27] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset) // @[LazyModuleImp.scala:158:31] ); // @[LazyScope.scala:98:27] TLInterconnectCoupler_mbus_to_memory_controller_port_named_axi4 coupler_to_memory_controller_port_named_axi4 ( // @[LazyScope.scala:98:27] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset), // @[LazyModuleImp.scala:158:31] .auto_widget_anon_in_a_ready (xbar_auto_anon_out_a_ready), .auto_widget_anon_in_a_valid (xbar_auto_anon_out_a_valid), // @[Xbar.scala:74:9] .auto_widget_anon_in_a_bits_opcode (xbar_auto_anon_out_a_bits_opcode), // @[Xbar.scala:74:9] .auto_widget_anon_in_a_bits_param (xbar_auto_anon_out_a_bits_param), // @[Xbar.scala:74:9] .auto_widget_anon_in_a_bits_size (xbar_auto_anon_out_a_bits_size), // @[Xbar.scala:74:9] .auto_widget_anon_in_a_bits_source (xbar_auto_anon_out_a_bits_source), // @[Xbar.scala:74:9] .auto_widget_anon_in_a_bits_address (xbar_auto_anon_out_a_bits_address), // @[Xbar.scala:74:9] .auto_widget_anon_in_a_bits_mask (xbar_auto_anon_out_a_bits_mask), // @[Xbar.scala:74:9] .auto_widget_anon_in_a_bits_data (xbar_auto_anon_out_a_bits_data), // @[Xbar.scala:74:9] .auto_widget_anon_in_a_bits_corrupt (xbar_auto_anon_out_a_bits_corrupt), // @[Xbar.scala:74:9] .auto_widget_anon_in_d_ready (xbar_auto_anon_out_d_ready), // @[Xbar.scala:74:9] .auto_widget_anon_in_d_valid (xbar_auto_anon_out_d_valid), .auto_widget_anon_in_d_bits_opcode (xbar_auto_anon_out_d_bits_opcode), .auto_widget_anon_in_d_bits_size (xbar_auto_anon_out_d_bits_size), .auto_widget_anon_in_d_bits_source (xbar_auto_anon_out_d_bits_source), .auto_widget_anon_in_d_bits_denied (xbar_auto_anon_out_d_bits_denied), .auto_widget_anon_in_d_bits_data (xbar_auto_anon_out_d_bits_data), .auto_widget_anon_in_d_bits_corrupt (xbar_auto_anon_out_d_bits_corrupt), .auto_axi4yank_out_aw_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_ready_0), // @[ClockDomain.scala:14:9] .auto_axi4yank_out_aw_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_valid_0), .auto_axi4yank_out_aw_bits_id (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_id_0), .auto_axi4yank_out_aw_bits_addr (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_addr_0), .auto_axi4yank_out_aw_bits_len (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_len_0), .auto_axi4yank_out_aw_bits_size (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_size_0), .auto_axi4yank_out_aw_bits_burst (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_burst_0), .auto_axi4yank_out_aw_bits_lock (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_lock_0), .auto_axi4yank_out_aw_bits_cache (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_cache_0), .auto_axi4yank_out_aw_bits_prot (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_prot_0), .auto_axi4yank_out_aw_bits_qos (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_qos_0), .auto_axi4yank_out_w_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_ready_0), // @[ClockDomain.scala:14:9] .auto_axi4yank_out_w_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_valid_0), .auto_axi4yank_out_w_bits_data (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_data_0), .auto_axi4yank_out_w_bits_strb (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_strb_0), .auto_axi4yank_out_w_bits_last (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_last_0), .auto_axi4yank_out_b_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_ready_0), .auto_axi4yank_out_b_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_valid_0), // @[ClockDomain.scala:14:9] .auto_axi4yank_out_b_bits_id (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_id_0), // @[ClockDomain.scala:14:9] .auto_axi4yank_out_b_bits_resp (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_bits_resp_0), // @[ClockDomain.scala:14:9] .auto_axi4yank_out_ar_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_ready_0), // @[ClockDomain.scala:14:9] .auto_axi4yank_out_ar_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_valid_0), .auto_axi4yank_out_ar_bits_id (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_id_0), .auto_axi4yank_out_ar_bits_addr (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_addr_0), .auto_axi4yank_out_ar_bits_len (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_len_0), .auto_axi4yank_out_ar_bits_size (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_size_0), .auto_axi4yank_out_ar_bits_burst (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_burst_0), .auto_axi4yank_out_ar_bits_lock (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_lock_0), .auto_axi4yank_out_ar_bits_cache (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_cache_0), .auto_axi4yank_out_ar_bits_prot (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_prot_0), .auto_axi4yank_out_ar_bits_qos (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_qos_0), .auto_axi4yank_out_r_ready (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_ready_0), .auto_axi4yank_out_r_valid (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_valid_0), // @[ClockDomain.scala:14:9] .auto_axi4yank_out_r_bits_id (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_id_0), // @[ClockDomain.scala:14:9] .auto_axi4yank_out_r_bits_data (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_data_0), // @[ClockDomain.scala:14:9] .auto_axi4yank_out_r_bits_resp (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_resp_0), // @[ClockDomain.scala:14:9] .auto_axi4yank_out_r_bits_last (auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_bits_last_0), // @[ClockDomain.scala:14:9] .auto_tl_in_a_ready (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_a_ready), .auto_tl_in_a_valid (_picker_auto_out_0_a_valid), // @[ProbePicker.scala:69:28] .auto_tl_in_a_bits_opcode (_picker_auto_out_0_a_bits_opcode), // @[ProbePicker.scala:69:28] .auto_tl_in_a_bits_param (_picker_auto_out_0_a_bits_param), // @[ProbePicker.scala:69:28] .auto_tl_in_a_bits_size (_picker_auto_out_0_a_bits_size), // @[ProbePicker.scala:69:28] .auto_tl_in_a_bits_source (_picker_auto_out_0_a_bits_source), // @[ProbePicker.scala:69:28] .auto_tl_in_a_bits_address (_picker_auto_out_0_a_bits_address), // @[ProbePicker.scala:69:28] .auto_tl_in_a_bits_mask (_picker_auto_out_0_a_bits_mask), // @[ProbePicker.scala:69:28] .auto_tl_in_a_bits_data (_picker_auto_out_0_a_bits_data), // @[ProbePicker.scala:69:28] .auto_tl_in_a_bits_corrupt (_picker_auto_out_0_a_bits_corrupt), // @[ProbePicker.scala:69:28] .auto_tl_in_d_ready (_picker_auto_out_0_d_ready), // @[ProbePicker.scala:69:28] .auto_tl_in_d_valid (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_valid), .auto_tl_in_d_bits_opcode (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_opcode), .auto_tl_in_d_bits_size (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_size), .auto_tl_in_d_bits_source (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_source), .auto_tl_in_d_bits_denied (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_denied), .auto_tl_in_d_bits_data (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_data), .auto_tl_in_d_bits_corrupt (_coupler_to_memory_controller_port_named_axi4_auto_tl_in_d_bits_corrupt), .auto_tl_out_a_ready (xbar_auto_anon_in_a_ready), // @[Xbar.scala:74:9] .auto_tl_out_a_valid (xbar_auto_anon_in_a_valid), .auto_tl_out_a_bits_opcode (xbar_auto_anon_in_a_bits_opcode), .auto_tl_out_a_bits_param (xbar_auto_anon_in_a_bits_param), .auto_tl_out_a_bits_size (xbar_auto_anon_in_a_bits_size), .auto_tl_out_a_bits_source (xbar_auto_anon_in_a_bits_source), .auto_tl_out_a_bits_address (xbar_auto_anon_in_a_bits_address), .auto_tl_out_a_bits_mask (xbar_auto_anon_in_a_bits_mask), .auto_tl_out_a_bits_data (xbar_auto_anon_in_a_bits_data), .auto_tl_out_a_bits_corrupt (xbar_auto_anon_in_a_bits_corrupt), .auto_tl_out_d_ready (xbar_auto_anon_in_d_ready), .auto_tl_out_d_valid (xbar_auto_anon_in_d_valid), // @[Xbar.scala:74:9] .auto_tl_out_d_bits_opcode (xbar_auto_anon_in_d_bits_opcode), // @[Xbar.scala:74:9] .auto_tl_out_d_bits_size (xbar_auto_anon_in_d_bits_size), // @[Xbar.scala:74:9] .auto_tl_out_d_bits_source (xbar_auto_anon_in_d_bits_source), // @[Xbar.scala:74:9] .auto_tl_out_d_bits_denied (xbar_auto_anon_in_d_bits_denied), // @[Xbar.scala:74:9] .auto_tl_out_d_bits_data (xbar_auto_anon_in_d_bits_data), // @[Xbar.scala:74:9] .auto_tl_out_d_bits_corrupt (xbar_auto_anon_in_d_bits_corrupt) // @[Xbar.scala:74:9] ); // @[LazyScope.scala:98:27] TLBuffer_a28d64s6k1z3u buffer_1 ( // @[Buffer.scala:75:28] .clock (childClock), // @[LazyModuleImp.scala:155:31] .reset (childReset), // @[LazyModuleImp.scala:158:31] .auto_in_a_ready (coupler_to_mbusscratchpad00_auto_tl_out_a_ready), .auto_in_a_valid (coupler_to_mbusscratchpad00_auto_tl_out_a_valid), // @[LazyModuleImp.scala:138:7] .auto_in_a_bits_opcode (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_opcode), // @[LazyModuleImp.scala:138:7] .auto_in_a_bits_param (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_param), // @[LazyModuleImp.scala:138:7] .auto_in_a_bits_size (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_size), // @[LazyModuleImp.scala:138:7] .auto_in_a_bits_source (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_source), // @[LazyModuleImp.scala:138:7] .auto_in_a_bits_address (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_address), // @[LazyModuleImp.scala:138:7] .auto_in_a_bits_mask (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_mask), // @[LazyModuleImp.scala:138:7] .auto_in_a_bits_data (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_data), // @[LazyModuleImp.scala:138:7] .auto_in_a_bits_corrupt (coupler_to_mbusscratchpad00_auto_tl_out_a_bits_corrupt), // @[LazyModuleImp.scala:138:7] .auto_in_d_ready (coupler_to_mbusscratchpad00_auto_tl_out_d_ready), // @[LazyModuleImp.scala:138:7] .auto_in_d_valid (coupler_to_mbusscratchpad00_auto_tl_out_d_valid), .auto_in_d_bits_opcode (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_opcode), .auto_in_d_bits_param (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_param), .auto_in_d_bits_size (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_size), .auto_in_d_bits_source (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_source), .auto_in_d_bits_sink (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_sink), .auto_in_d_bits_denied (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_denied), .auto_in_d_bits_data (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_data), .auto_in_d_bits_corrupt (coupler_to_mbusscratchpad00_auto_tl_out_d_bits_corrupt), .auto_out_a_ready (auto_buffer_out_a_ready_0), // @[ClockDomain.scala:14:9] .auto_out_a_valid (auto_buffer_out_a_valid_0), .auto_out_a_bits_opcode (auto_buffer_out_a_bits_opcode_0), .auto_out_a_bits_param (auto_buffer_out_a_bits_param_0), .auto_out_a_bits_size (auto_buffer_out_a_bits_size_0), .auto_out_a_bits_source (auto_buffer_out_a_bits_source_0), .auto_out_a_bits_address (auto_buffer_out_a_bits_address_0), .auto_out_a_bits_mask (auto_buffer_out_a_bits_mask_0), .auto_out_a_bits_data (auto_buffer_out_a_bits_data_0), .auto_out_a_bits_corrupt (auto_buffer_out_a_bits_corrupt_0), .auto_out_d_ready (auto_buffer_out_d_ready_0), .auto_out_d_valid (auto_buffer_out_d_valid_0), // @[ClockDomain.scala:14:9] .auto_out_d_bits_opcode (auto_buffer_out_d_bits_opcode_0), // @[ClockDomain.scala:14:9] .auto_out_d_bits_param (auto_buffer_out_d_bits_param_0), // @[ClockDomain.scala:14:9] .auto_out_d_bits_size (auto_buffer_out_d_bits_size_0), // @[ClockDomain.scala:14:9] .auto_out_d_bits_source (auto_buffer_out_d_bits_source_0), // @[ClockDomain.scala:14:9] .auto_out_d_bits_sink (auto_buffer_out_d_bits_sink_0), // @[ClockDomain.scala:14:9] .auto_out_d_bits_denied (auto_buffer_out_d_bits_denied_0), // @[ClockDomain.scala:14:9] .auto_out_d_bits_data (auto_buffer_out_d_bits_data_0), // @[ClockDomain.scala:14:9] .auto_out_d_bits_corrupt (auto_buffer_out_d_bits_corrupt_0) // @[ClockDomain.scala:14:9] ); // @[Buffer.scala:75:28] assign auto_buffer_out_a_valid = auto_buffer_out_a_valid_0; // @[ClockDomain.scala:14:9] assign auto_buffer_out_a_bits_opcode = auto_buffer_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9] assign auto_buffer_out_a_bits_param = auto_buffer_out_a_bits_param_0; // @[ClockDomain.scala:14:9] assign auto_buffer_out_a_bits_size = auto_buffer_out_a_bits_size_0; // @[ClockDomain.scala:14:9] assign auto_buffer_out_a_bits_source = auto_buffer_out_a_bits_source_0; // @[ClockDomain.scala:14:9] assign auto_buffer_out_a_bits_address = auto_buffer_out_a_bits_address_0; // @[ClockDomain.scala:14:9] assign auto_buffer_out_a_bits_mask = auto_buffer_out_a_bits_mask_0; // @[ClockDomain.scala:14:9] assign auto_buffer_out_a_bits_data = auto_buffer_out_a_bits_data_0; // @[ClockDomain.scala:14:9] assign auto_buffer_out_a_bits_corrupt = auto_buffer_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9] assign auto_buffer_out_d_ready = auto_buffer_out_d_ready_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_valid = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_valid_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_id = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_id_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_addr = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_addr_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_len = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_len_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_size = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_size_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_burst = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_burst_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_lock = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_lock_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_cache = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_cache_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_prot = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_prot_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_qos = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_aw_bits_qos_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_valid = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_valid_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_data = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_data_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_strb = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_strb_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_last = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_w_bits_last_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_ready = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_b_ready_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_valid = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_valid_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_id = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_id_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_addr = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_addr_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_len = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_len_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_size = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_size_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_burst = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_burst_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_lock = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_lock_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_cache = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_cache_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_prot = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_prot_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_qos = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_ar_bits_qos_0; // @[ClockDomain.scala:14:9] assign auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_ready = auto_coupler_to_memory_controller_port_named_axi4_axi4yank_out_r_ready_0; // @[ClockDomain.scala:14:9] assign auto_fixedClockNode_anon_out_1_clock = auto_fixedClockNode_anon_out_1_clock_0; // @[ClockDomain.scala:14:9] assign auto_fixedClockNode_anon_out_1_reset = auto_fixedClockNode_anon_out_1_reset_0; // @[ClockDomain.scala:14:9] assign auto_fixedClockNode_anon_out_0_clock = auto_fixedClockNode_anon_out_0_clock_0; // @[ClockDomain.scala:14:9] assign auto_fixedClockNode_anon_out_0_reset = auto_fixedClockNode_anon_out_0_reset_0; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_a_ready = auto_bus_xing_in_a_ready_0; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_valid = auto_bus_xing_in_d_valid_0; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_opcode = auto_bus_xing_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_param = auto_bus_xing_in_d_bits_param_0; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_size = auto_bus_xing_in_d_bits_size_0; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_source = auto_bus_xing_in_d_bits_source_0; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_sink = auto_bus_xing_in_d_bits_sink_0; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_denied = auto_bus_xing_in_d_bits_denied_0; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_data = auto_bus_xing_in_d_bits_data_0; // @[ClockDomain.scala:14:9] assign auto_bus_xing_in_d_bits_corrupt = auto_bus_xing_in_d_bits_corrupt_0; // @[ClockDomain.scala:14:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module MacUnit_167( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [7:0] io_in_a, // @[PE.scala:16:14] input [7:0] io_in_b, // @[PE.scala:16:14] input [19:0] io_in_c, // @[PE.scala:16:14] output [19:0] io_out_d // @[PE.scala:16:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7] wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7] wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54] wire [19:0] io_out_d_0; // @[PE.scala:14:7] wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7] wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54] assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54] assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7] assign io_out_d = io_out_d_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w4_d3_i0_39( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input [3:0] io_d, // @[ShiftReg.scala:36:14] output [3:0] io_q // @[ShiftReg.scala:36:14] ); wire [3:0] io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_2 = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_4 = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_6 = reset; // @[SynchronizerReg.scala:86:21] wire [3:0] _io_q_T; // @[SynchronizerReg.scala:90:14] wire [3:0] io_q_0; // @[SynchronizerReg.scala:80:7] wire _output_T_1 = io_d_0[0]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire _output_T_3 = io_d_0[1]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_1; // @[ShiftReg.scala:48:24] wire _output_T_5 = io_d_0[2]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_2; // @[ShiftReg.scala:48:24] wire _output_T_7 = io_d_0[3]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_3; // @[ShiftReg.scala:48:24] wire [1:0] io_q_lo = {output_1, output_0}; // @[SynchronizerReg.scala:90:14] wire [1:0] io_q_hi = {output_3, output_2}; // @[SynchronizerReg.scala:90:14] assign _io_q_T = {io_q_hi, io_q_lo}; // @[SynchronizerReg.scala:90:14] assign io_q_0 = _io_q_T; // @[SynchronizerReg.scala:80:7, :90:14] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_350 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_351 output_chain_1 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_2), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_3), // @[SynchronizerReg.scala:87:41] .io_q (output_1) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_352 output_chain_2 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_4), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_5), // @[SynchronizerReg.scala:87:41] .io_q (output_2) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_353 output_chain_3 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_6), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_7), // @[SynchronizerReg.scala:87:41] .io_q (output_3) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_28( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [11:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [11:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27] wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_27 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_46 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_48 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_52 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_54 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_58 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_60 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_64 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_66 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_70 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_72 = 1'h1; // @[Parameters.scala:57:20] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28] wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28] wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [11:0] _c_first_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_first_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_first_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_first_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_set_wo_ready_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_set_wo_ready_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_opcodes_set_interm_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_opcodes_set_interm_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_sizes_set_interm_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_sizes_set_interm_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_opcodes_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_opcodes_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_sizes_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_sizes_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_probe_ack_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_probe_ack_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_probe_ack_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_probe_ack_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _same_cycle_resp_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _same_cycle_resp_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _same_cycle_resp_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _same_cycle_resp_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _same_cycle_resp_WIRE_4_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _same_cycle_resp_WIRE_5_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54] wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52] wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79] wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51] wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35] wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35] wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34] wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34] wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34] wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34] wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_25 = io_in_a_bits_source_0[6:3]; // @[Monitor.scala:36:7] wire _source_ok_T_26 = _source_ok_T_25 == 4'h4; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_28 = _source_ok_T_26; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_5 = _source_ok_T_30; // @[Parameters.scala:1138:31] wire _source_ok_T_31 = io_in_a_bits_source_0 == 7'h28; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_31; // @[Parameters.scala:1138:31] wire _source_ok_T_32 = io_in_a_bits_source_0 == 7'h29; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31] wire _source_ok_T_33 = io_in_a_bits_source_0 == 7'h2A; // @[Monitor.scala:36:7] wire _source_ok_WIRE_8 = _source_ok_T_33; // @[Parameters.scala:1138:31] wire _source_ok_T_34 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_9 = _source_ok_T_34; // @[Parameters.scala:1138:31] wire _source_ok_T_35 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_36 = _source_ok_T_35 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_37 = _source_ok_T_36 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_38 = _source_ok_T_37 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_39 = _source_ok_T_38 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_40 = _source_ok_T_39 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_41 = _source_ok_T_40 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_42 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [11:0] _is_aligned_T = {6'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 12'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_4 = _uncommonBits_T_4[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_9 = _uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_14 = _uncommonBits_T_14[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_19 = _uncommonBits_T_19[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_24 = _uncommonBits_T_24[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_29 = _uncommonBits_T_29[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_34 = _uncommonBits_T_34[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_39 = _uncommonBits_T_39[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_44 = _uncommonBits_T_44[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_49 = _uncommonBits_T_49[2:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] uncommonBits_54 = _uncommonBits_T_54[2:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_43 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_43; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_44 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_50 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_56 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_62 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_45 = _source_ok_T_44 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_47 = _source_ok_T_45; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_49 = _source_ok_T_47; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_49; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_51 = _source_ok_T_50 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_53 = _source_ok_T_51; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_55 = _source_ok_T_53; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_55; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_57 = _source_ok_T_56 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_59 = _source_ok_T_57; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_61 = _source_ok_T_59; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_61; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_63 = _source_ok_T_62 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_65 = _source_ok_T_63; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_67 = _source_ok_T_65; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_67; // @[Parameters.scala:1138:31] wire [2:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[2:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_68 = io_in_d_bits_source_0[6:3]; // @[Monitor.scala:36:7] wire _source_ok_T_69 = _source_ok_T_68 == 4'h4; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_71 = _source_ok_T_69; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_73 = _source_ok_T_71; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_5 = _source_ok_T_73; // @[Parameters.scala:1138:31] wire _source_ok_T_74 = io_in_d_bits_source_0 == 7'h28; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_74; // @[Parameters.scala:1138:31] wire _source_ok_T_75 = io_in_d_bits_source_0 == 7'h29; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_75; // @[Parameters.scala:1138:31] wire _source_ok_T_76 = io_in_d_bits_source_0 == 7'h2A; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_8 = _source_ok_T_76; // @[Parameters.scala:1138:31] wire _source_ok_T_77 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_9 = _source_ok_T_77; // @[Parameters.scala:1138:31] wire _source_ok_T_78 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_79 = _source_ok_T_78 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_80 = _source_ok_T_79 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_81 = _source_ok_T_80 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_82 = _source_ok_T_81 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_83 = _source_ok_T_82 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_84 = _source_ok_T_83 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_85 = _source_ok_T_84 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_85 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46] wire _T_1180 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1180; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1180; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [11:0] address; // @[Monitor.scala:391:22] wire _T_1248 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1248; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1248; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1248; // @[Decoupled.scala:51:35] wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg [64:0] inflight; // @[Monitor.scala:614:27] reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [259:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [64:0] a_set; // @[Monitor.scala:626:34] wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [259:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [127:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1113 = _T_1180 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1113 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1113 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1113 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1113 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1113 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [64:0] d_clr; // @[Monitor.scala:664:34] wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_1159 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1159 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1128 = _T_1248 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1128 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1128 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1128 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [64:0] inflight_1; // @[Monitor.scala:726:35] wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [64:0] d_clr_1; // @[Monitor.scala:774:34] wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1224 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1224 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1206 = _T_1248 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1206 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1206 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1206 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113] wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: package constellation.channel import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy._ import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.util._ import constellation.noc.{HasNoCParams} class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams { val io = IO(new Bundle { val in = Input(new Channel(cParam)) }) val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B })) for (i <- 0 until cParam.srcSpeedup) { val flit = io.in.flit(i) when (flit.valid) { when (flit.bits.head) { in_flight(flit.bits.virt_channel_id) := true.B assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken") } when (flit.bits.tail) { in_flight(flit.bits.virt_channel_id) := false.B } } val possibleFlows = cParam.possibleFlows when (flit.valid && flit.bits.head) { cParam match { case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) => assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } } } } File Types.scala: package constellation.routing import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Parameters} import constellation.noc.{HasNoCParams} import constellation.channel.{Flit} /** A representation for 1 specific virtual channel in wormhole routing * * @param src the source node * @param vc ID for the virtual channel * @param dst the destination node * @param n_vc the number of virtual channels */ // BEGIN: ChannelRoutingInfo case class ChannelRoutingInfo( src: Int, dst: Int, vc: Int, n_vc: Int ) { // END: ChannelRoutingInfo require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this") require (!(src == -1 && dst == -1), s"Illegal $this") require (vc < n_vc, s"Illegal $this") val isIngress = src == -1 val isEgress = dst == -1 } /** Represents the properties of a packet that are relevant for routing * ingressId and egressId uniquely identify a flow, but vnet and dst are used here * to simplify the implementation of routingrelations * * @param ingressId packet's source ingress point * @param egressId packet's destination egress point * @param vNet virtual subnetwork identifier * @param dst packet's destination node ID */ // BEGIN: FlowRoutingInfo case class FlowRoutingInfo( ingressId: Int, egressId: Int, vNetId: Int, ingressNode: Int, ingressNodeId: Int, egressNode: Int, egressNodeId: Int, fifo: Boolean ) { // END: FlowRoutingInfo def isFlow(f: FlowRoutingBundle): Bool = { (f.ingress_node === ingressNode.U && f.egress_node === egressNode.U && f.ingress_node_id === ingressNodeId.U && f.egress_node_id === egressNodeId.U) } def asLiteral(b: FlowRoutingBundle): BigInt = { Seq( (vNetId , b.vnet_id), (ingressNode , b.ingress_node), (ingressNodeId , b.ingress_node_id), (egressNode , b.egress_node), (egressNodeId , b.egress_node_id) ).foldLeft(0)((l, t) => { (l << t._2.getWidth) | t._1 }) } } class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams { // Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination // This simplifies the routing tables val vnet_id = UInt(log2Ceil(nVirtualNetworks).W) val ingress_node = UInt(log2Ceil(nNodes).W) val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W) val egress_node = UInt(log2Ceil(nNodes).W) val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W) }
module NoCMonitor_7( // @[Monitor.scala:11:7] input clock, // @[Monitor.scala:11:7] input reset, // @[Monitor.scala:11:7] input io_in_flit_0_valid, // @[Monitor.scala:12:14] input io_in_flit_0_bits_head, // @[Monitor.scala:12:14] input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14] input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14] input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14] input [3:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14] ); reg in_flight_0; // @[Monitor.scala:16:26] reg in_flight_1; // @[Monitor.scala:16:26] reg in_flight_2; // @[Monitor.scala:16:26] wire _GEN = io_in_flit_0_bits_virt_channel_id == 2'h1; // @[Monitor.scala:21:46] wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 2'h2; // @[Monitor.scala:21:46]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_13( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [26:0] _GEN = {23'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [11:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [3:0] size; // @[Monitor.scala:389:22] reg [31:0] address; // @[Monitor.scala:391:22] reg [11:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg source_1; // @[Monitor.scala:541:22] reg [2:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [1:0] inflight; // @[Monitor.scala:614:27] reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [7:0] inflight_sizes; // @[Monitor.scala:618:33] reg [11:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 12'h0; // @[Edges.scala:229:27, :231:25] reg [11:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 12'h0; // @[Edges.scala:229:27, :231:25] wire a_set = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:36:7, :673:46] wire _GEN_0 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:36:7, :673:46, :674:74] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [1:0] inflight_1; // @[Monitor.scala:726:35] reg [7:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [11:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 12'h0; // @[Edges.scala:229:27, :231:25] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_49( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [10:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [25:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [10:0] io_in_d_bits_source // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire a_first_done = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [1:0] size; // @[Monitor.scala:389:22] reg [10:0] source; // @[Monitor.scala:390:22] reg [25:0] address; // @[Monitor.scala:391:22] reg d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] size_1; // @[Monitor.scala:540:22] reg [10:0] source_1; // @[Monitor.scala:541:22] reg [1039:0] inflight; // @[Monitor.scala:614:27] reg [4159:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [4159:0] inflight_sizes; // @[Monitor.scala:618:33] reg a_first_counter_1; // @[Edges.scala:229:27] reg d_first_counter_1; // @[Edges.scala:229:27] wire [2047:0] _GEN = {2037'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_0 = a_first_done & ~a_first_counter_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_1 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [2047:0] _GEN_2 = {2037'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [1039:0] inflight_1; // @[Monitor.scala:726:35] reg [4159:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg d_first_counter_2; // @[Edges.scala:229:27] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready }
module dataMems_442( // @[UnsafeAXI4ToTL.scala:365:62] input [4:0] R0_addr, input R0_en, input R0_clk, output [66:0] R0_data, input [4:0] W0_addr, input W0_en, input W0_clk, input [66:0] W0_data ); dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data) ); // @[UnsafeAXI4ToTL.scala:365:62] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w4_d3_i0_13( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input [3:0] io_d, // @[ShiftReg.scala:36:14] output [3:0] io_q // @[ShiftReg.scala:36:14] ); wire [3:0] io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_2 = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_4 = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_6 = reset; // @[SynchronizerReg.scala:86:21] wire [3:0] _io_q_T; // @[SynchronizerReg.scala:90:14] wire [3:0] io_q_0; // @[SynchronizerReg.scala:80:7] wire _output_T_1 = io_d_0[0]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire _output_T_3 = io_d_0[1]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_1; // @[ShiftReg.scala:48:24] wire _output_T_5 = io_d_0[2]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_2; // @[ShiftReg.scala:48:24] wire _output_T_7 = io_d_0[3]; // @[SynchronizerReg.scala:80:7, :87:41] wire output_3; // @[ShiftReg.scala:48:24] wire [1:0] io_q_lo = {output_1, output_0}; // @[SynchronizerReg.scala:90:14] wire [1:0] io_q_hi = {output_3, output_2}; // @[SynchronizerReg.scala:90:14] assign _io_q_T = {io_q_hi, io_q_lo}; // @[SynchronizerReg.scala:90:14] assign io_q_0 = _io_q_T; // @[SynchronizerReg.scala:80:7, :90:14] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_138 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_139 output_chain_1 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_2), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_3), // @[SynchronizerReg.scala:87:41] .io_q (output_1) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_140 output_chain_2 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_4), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_5), // @[SynchronizerReg.scala:87:41] .io_q (output_2) ); // @[ShiftReg.scala:45:23] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_141 output_chain_3 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T_6), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_7), // @[SynchronizerReg.scala:87:41] .io_q (output_3) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftRegisterPriorityQueue.scala: package compressacc import chisel3._ import chisel3.util._ import chisel3.util._ // TODO : support enq & deq at the same cycle class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle { val output_prev = KeyValue(keyWidth, value) val output_nxt = KeyValue(keyWidth, value) val input_prev = Flipped(KeyValue(keyWidth, value)) val input_nxt = Flipped(KeyValue(keyWidth, value)) val cmd = Flipped(Valid(UInt(1.W))) val insert_here = Input(Bool()) val cur_input_keyval = Flipped(KeyValue(keyWidth, value)) val cur_output_keyval = KeyValue(keyWidth, value) } class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module { val io = IO(new PriorityQueueStageIO(keyWidth, value)) dontTouch(io) val CMD_DEQ = 0.U val CMD_ENQ = 1.U val MAX_VALUE = (1 << keyWidth) - 1 val key_reg = RegInit(MAX_VALUE.U(keyWidth.W)) val value_reg = Reg(value) io.output_prev.key := key_reg io.output_prev.value := value_reg io.output_nxt.key := key_reg io.output_nxt.value := value_reg io.cur_output_keyval.key := key_reg io.cur_output_keyval.value := value_reg when (io.cmd.valid) { switch (io.cmd.bits) { is (CMD_DEQ) { key_reg := io.input_nxt.key value_reg := io.input_nxt.value } is (CMD_ENQ) { when (io.insert_here) { key_reg := io.cur_input_keyval.key value_reg := io.cur_input_keyval.value } .elsewhen (key_reg >= io.cur_input_keyval.key) { key_reg := io.input_prev.key value_reg := io.input_prev.value } .otherwise { // do nothing } } } } } object PriorityQueueStage { def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v) } // TODO // - This design is not scalable as the enqued_keyval is broadcasted to all the stages // - Add pipeline registers later class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle { val cnt_bits = log2Ceil(queSize+1) val counter = Output(UInt(cnt_bits.W)) val enq = Flipped(Decoupled(KeyValue(keyWidth, value))) val deq = Decoupled(KeyValue(keyWidth, value)) } class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module { val keyWidthInternal = keyWidth + 1 val CMD_DEQ = 0.U val CMD_ENQ = 1.U val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value)) dontTouch(io) val MAX_VALUE = ((1 << keyWidthInternal) - 1).U val cnt_bits = log2Ceil(queSize+1) // do not consider cases where we are inserting more entries then the queSize val counter = RegInit(0.U(cnt_bits.W)) io.counter := counter val full = (counter === queSize.U) val empty = (counter === 0.U) io.deq.valid := !empty io.enq.ready := !full when (io.enq.fire) { counter := counter + 1.U } when (io.deq.fire) { counter := counter - 1.U } val cmd_valid = io.enq.valid || io.deq.ready val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ) assert(!(io.enq.valid && io.deq.ready)) val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value))) for (i <- 0 until (queSize - 1)) { stages(i+1).io.input_prev <> stages(i).io.output_nxt stages(i).io.input_nxt <> stages(i+1).io.output_prev } stages(queSize-1).io.input_nxt.key := MAX_VALUE // stages(queSize-1).io.input_nxt.value := stages(queSize-1).io.input_nxt.value.symbol := 0.U // stages(queSize-1).io.input_nxt.value.child(0) := 0.U // stages(queSize-1).io.input_nxt.value.child(1) := 0.U stages(0).io.input_prev.key := io.enq.bits.key stages(0).io.input_prev.value <> io.enq.bits.value for (i <- 0 until queSize) { stages(i).io.cmd.valid := cmd_valid stages(i).io.cmd.bits := cmd stages(i).io.cur_input_keyval <> io.enq.bits } val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B))) for (i <- 0 until queSize) { is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key) } val is_large_or_equal_cat = Wire(UInt(queSize.W)) is_large_or_equal_cat := Cat(is_large_or_equal.reverse) val insert_here_idx = PriorityEncoder(is_large_or_equal_cat) for (i <- 0 until queSize) { when (i.U === insert_here_idx) { stages(i).io.insert_here := true.B } .otherwise { stages(i).io.insert_here := false.B } } io.deq.bits <> stages(0).io.output_prev }
module PriorityQueueStage_48( // @[ShiftRegisterPriorityQueue.scala:21:7] input clock, // @[ShiftRegisterPriorityQueue.scala:21:7] input reset, // @[ShiftRegisterPriorityQueue.scala:21:7] output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14] ); wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24] assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22] assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30] always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24] else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30] key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] end else // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] end if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7] value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30] value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] end else // @[ShiftRegisterPriorityQueue.scala:21:7] value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] end always @(posedge) assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_306( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19] wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File InputUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ import constellation.routing.{FlowRoutingBundle} import constellation.noc.{HasNoCParams} class AbstractInputUnitIO( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams], )(implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val nodeId = cParam.destId val router_req = Decoupled(new RouteComputerReq) val router_resp = Input(new RouteComputerResp(outParams, egressParams)) val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams)) val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams)) val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })) val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams))) val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams))) val debug = Output(new Bundle { val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) }) val block = Input(Bool()) } abstract class AbstractInputUnit( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams { val nodeId = cParam.destId def io: AbstractInputUnitIO } class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module { val nVirtualChannels = cParam.nVirtualChannels val io = IO(new Bundle { val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits)))) val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits))) }) val useOutputQueues = cParam.useOutputQueues val delims = if (useOutputQueues) { cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_) } else { // If no queuing, have to add an additional slot since head == tail implies empty // TODO this should be fixed, should use all slots available cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_) } val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val ends = delims.tail.zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val fullSize = delims.last // Ugly case. Use multiple queues if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) { require(useOutputQueues) val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize))) qs.zipWithIndex.foreach { case (q,i) => val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U) q.io.enq.valid := sel.orR q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head)) q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail)) q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload)) io.deq(i) <> q.io.deq } } else { val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits)) val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val empty = (heads zip tails).map(t => t._1 === t._2) val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) } qs.foreach(_.io.enq.valid := false.B) qs.foreach(_.io.enq.bits := DontCare) val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id) val flit = Wire(new BaseFlit(cParam.payloadBits)) val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B flit.head := io.enq(0).bits.head flit.tail := io.enq(0).bits.tail flit.payload := io.enq(0).bits.payload when (io.enq(0).valid && !direct_to_q) { val tail = tails(io.enq(0).bits.virt_channel_id) mem.write(tail, flit) tails(io.enq(0).bits.virt_channel_id) := Mux( tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(vc_sel, starts.map(_.U)), tail + 1.U) } .elsewhen (io.enq(0).valid && direct_to_q) { for (i <- 0 until nVirtualChannels) { when (io.enq(0).bits.virt_channel_id === i.U) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := flit } } } if (useOutputQueues) { val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready } val to_q_oh = PriorityEncoderOH(can_to_q) val to_q = OHToUInt(to_q_oh) when (can_to_q.orR) { val head = Mux1H(to_q_oh, heads) heads(to_q) := Mux( head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(to_q_oh, starts.map(_.U)), head + 1.U) for (i <- 0 until nVirtualChannels) { when (to_q_oh(i)) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := mem.read(head) } } } for (i <- 0 until nVirtualChannels) { io.deq(i) <> qs(i).io.deq } } else { qs.map(_.io.deq.ready := false.B) val ready_sel = io.deq.map(_.ready) val fire = io.deq.map(_.fire) assert(PopCount(fire) <= 1.U) val head = Mux1H(fire, heads) when (fire.orR) { val fire_idx = OHToUInt(fire) heads(fire_idx) := Mux( head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(fire, starts.map(_.U)), head + 1.U) } val read_flit = mem.read(head) for (i <- 0 until nVirtualChannels) { io.deq(i).valid := !empty(i) io.deq(i).bits := read_flit } } } } class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams], combineRCVA: Boolean, combineSAST: Boolean ) (implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) { val nVirtualChannels = cParam.nVirtualChannels val virtualChannelParams = cParam.virtualChannelParams class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) { val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams])) } val io = IO(new InputUnitIO) val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5) class InputState extends Bundle { val g = UInt(3.W) val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) val flow = new FlowRoutingBundle val fifo_deps = UInt(nVirtualChannels.W) } val input_buffer = Module(new InputBuffer(cParam)) for (i <- 0 until cParam.srcSpeedup) { input_buffer.io.enq(i) := io.in.flit(i) } input_buffer.io.deq.foreach(_.ready := false.B) val route_arbiter = Module(new Arbiter( new RouteComputerReq, nVirtualChannels )) io.router_req <> route_arbiter.io.out val states = Reg(Vec(nVirtualChannels, new InputState)) val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_) val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_) if (anyFifo) { val idle_mask = VecInit(states.map(_.g === g_i)).asUInt for (s <- states) for (i <- 0 until nVirtualChannels) s.fifo_deps := s.fifo_deps & ~idle_mask } for (i <- 0 until cParam.srcSpeedup) { when (io.in.flit(i).fire && io.in.flit(i).bits.head) { val id = io.in.flit(i).bits.virt_channel_id assert(id < nVirtualChannels.U) assert(states(id).g === g_i) val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U states(id).g := Mux(at_dest, g_v, g_r) states(id).vc_sel.foreach(_.foreach(_ := false.B)) for (o <- 0 until nEgress) { when (o.U === io.in.flit(i).bits.flow.egress_node_id) { states(id).vc_sel(o+nOutputs)(0) := true.B } } states(id).flow := io.in.flit(i).bits.flow if (anyFifo) { val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) => s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id }).asUInt } } } (route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) => if (virtualChannelParams(idx).traversable) { i.valid := s.g === g_r i.bits.flow := s.flow i.bits.src_virt_id := idx.U when (i.fire) { s.g := g_v } } else { i.valid := false.B i.bits := DontCare } } when (io.router_req.fire) { val id = io.router_req.bits.src_virt_id assert(states(id).g === g_r) states(id).g := g_v for (i <- 0 until nVirtualChannels) { when (i.U === id) { states(i).vc_sel := io.router_resp.vc_sel } } } val mask = RegInit(0.U(nVirtualChannels.W)) val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams))) val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool())) val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask)) val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels) // Prioritize incoming packetes when (io.router_req.fire) { mask := (1.U << io.router_req.bits.src_virt_id) - 1.U } .elsewhen (vcalloc_vals.orR) { mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) }) } io.vcalloc_req.valid := vcalloc_vals.orR io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs) states.zipWithIndex.map { case (s,idx) => if (virtualChannelParams(idx).traversable) { vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U vcalloc_reqs(idx).in_vc := idx.U vcalloc_reqs(idx).vc_sel := s.vc_sel vcalloc_reqs(idx).flow := s.flow when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a } if (combineRCVA) { when (route_arbiter.io.in(idx).fire) { vcalloc_vals(idx) := true.B vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel } } } else { vcalloc_vals(idx) := false.B vcalloc_reqs(idx) := DontCare } } io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready when (io.vcalloc_req.fire) { for (i <- 0 until nVirtualChannels) { when (vcalloc_sel(i)) { states(i).vc_sel := io.vcalloc_resp.vc_sel states(i).g := g_a if (!combineRCVA) { assert(states(i).g === g_v) } } } } val salloc_arb = Module(new SwitchArbiter( nVirtualChannels, cParam.destSpeedup, outParams, egressParams )) (states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) => if (virtualChannelParams(i).traversable) { val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid r.bits.vc_sel := s.vc_sel val deq_tail = input_buffer.io.deq(i).bits.tail r.bits.tail := deq_tail when (r.fire && deq_tail) { s.g := g_i } input_buffer.io.deq(i).ready := r.ready } else { r.valid := false.B r.bits := DontCare } } io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready)) io.salloc_req <> salloc_arb.io.out when (io.block) { salloc_arb.io.out.foreach(_.ready := false.B) io.salloc_req.foreach(_.valid := false.B) } class OutBundle extends Bundle { val valid = Bool() val vid = UInt(virtualChannelBits.W) val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W) val flit = new Flit(cParam.payloadBits) } val salloc_outs = if (combineSAST) { Wire(Vec(cParam.destSpeedup, new OutBundle)) } else { Reg(Vec(cParam.destSpeedup, new OutBundle)) } io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)), salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) for (i <- 0 until cParam.destSpeedup) { val salloc_out = salloc_outs(i) salloc_out.valid := salloc_arb.io.out(i).fire salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i)) val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel)) val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq) when (salloc_arb.io.out(i).fire) { salloc_out.out_vid := virt_channel salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload)) salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head)) salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)) salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow)) } .otherwise { salloc_out.out_vid := DontCare salloc_out.flit := DontCare } salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch io.out(i).valid := salloc_out.valid io.out(i).bits.flit := salloc_out.flit io.out(i).bits.out_virt_channel := salloc_out.out_vid } def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = { if (virtualChannelParams(srcV).traversable) { outParams.zipWithIndex.map { case (oP, oI) => (0 until oP.nVirtualChannels).map { oV => var allow = false virtualChannelParams(srcV).possibleFlows.foreach { pI => allow = allow || routingRelation( cParam.channelRoutingInfos(srcV), oP.channelRoutingInfos(oV), pI ) } if (!allow) sel(oI)(oV) := false.B } } } } (0 until nVirtualChannels).map { i => if (!virtualChannelParams(i).traversable) states(i) := DontCare filterVCSel(states(i).vc_sel, i) } when (reset.asBool) { states.foreach(_.g := g_i) } }
module InputUnit_15( // @[InputUnit.scala:158:7] input clock, // @[InputUnit.scala:158:7] input reset, // @[InputUnit.scala:158:7] output [4:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14] output [3:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14] output [5:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14] output [2:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [5:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14] output [2:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_9, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_13, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_17, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_20, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_21, // @[InputUnit.scala:170:14] input io_vcalloc_req_ready, // @[InputUnit.scala:170:14] output io_vcalloc_req_valid, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_9, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_13, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_17, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_20, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_21, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_9, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_13, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_17, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_20, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_1_21, // @[InputUnit.scala:170:14] input io_out_credit_available_1_2, // @[InputUnit.scala:170:14] input io_out_credit_available_1_3, // @[InputUnit.scala:170:14] input io_out_credit_available_1_8, // @[InputUnit.scala:170:14] input io_out_credit_available_1_9, // @[InputUnit.scala:170:14] input io_out_credit_available_1_10, // @[InputUnit.scala:170:14] input io_out_credit_available_1_11, // @[InputUnit.scala:170:14] input io_out_credit_available_1_12, // @[InputUnit.scala:170:14] input io_out_credit_available_1_13, // @[InputUnit.scala:170:14] input io_out_credit_available_1_14, // @[InputUnit.scala:170:14] input io_out_credit_available_1_15, // @[InputUnit.scala:170:14] input io_out_credit_available_1_16, // @[InputUnit.scala:170:14] input io_out_credit_available_1_17, // @[InputUnit.scala:170:14] input io_out_credit_available_1_18, // @[InputUnit.scala:170:14] input io_out_credit_available_1_19, // @[InputUnit.scala:170:14] input io_out_credit_available_1_20, // @[InputUnit.scala:170:14] input io_out_credit_available_1_21, // @[InputUnit.scala:170:14] input io_out_credit_available_0_2, // @[InputUnit.scala:170:14] input io_out_credit_available_0_3, // @[InputUnit.scala:170:14] input io_out_credit_available_0_10, // @[InputUnit.scala:170:14] input io_out_credit_available_0_11, // @[InputUnit.scala:170:14] input io_out_credit_available_0_14, // @[InputUnit.scala:170:14] input io_out_credit_available_0_15, // @[InputUnit.scala:170:14] input io_out_credit_available_0_18, // @[InputUnit.scala:170:14] input io_out_credit_available_0_19, // @[InputUnit.scala:170:14] input io_out_credit_available_0_20, // @[InputUnit.scala:170:14] input io_out_credit_available_0_21, // @[InputUnit.scala:170:14] input io_salloc_req_0_ready, // @[InputUnit.scala:170:14] output io_salloc_req_0_valid, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_3, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_8, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_9, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_10, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_11, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_12, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_13, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_14, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_15, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_16, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_17, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_18, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_19, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_20, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_21, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_3, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_8, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_9, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_10, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_11, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_12, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_13, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_14, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_15, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_16, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_17, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_18, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_19, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_20, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_21, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14] output io_out_0_valid, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14] output [72:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14] output [3:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14] output [5:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14] output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [5:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14] output [2:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14] output [4:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14] output [4:0] io_debug_va_stall, // @[InputUnit.scala:170:14] output [4:0] io_debug_sa_stall, // @[InputUnit.scala:170:14] input io_in_flit_0_valid, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14] input [72:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14] input [3:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14] input [5:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14] input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] input [5:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14] input [2:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input [4:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14] output [21:0] io_in_credit_return, // @[InputUnit.scala:170:14] output [21:0] io_in_vc_free // @[InputUnit.scala:170:14] ); wire vcalloc_vals_21; // @[InputUnit.scala:266:32] wire vcalloc_vals_20; // @[InputUnit.scala:266:32] wire vcalloc_vals_17; // @[InputUnit.scala:266:32] wire vcalloc_vals_16; // @[InputUnit.scala:266:32] wire vcalloc_vals_13; // @[InputUnit.scala:266:32] wire vcalloc_vals_12; // @[InputUnit.scala:266:32] wire vcalloc_vals_9; // @[InputUnit.scala:266:32] wire vcalloc_vals_8; // @[InputUnit.scala:266:32] wire _salloc_arb_io_in_8_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_9_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_12_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_13_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_16_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_17_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_20_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_21_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26] wire [21:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26] wire _route_arbiter_io_in_8_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_9_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_12_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_13_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_16_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_17_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_20_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_21_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29] wire [4:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29] wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_3_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_3_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_3_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_4_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_4_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_4_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_5_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_5_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_5_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_6_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_6_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_6_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_7_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_7_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_7_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_8_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_8_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_8_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_8_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_9_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_9_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_9_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_9_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_10_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_10_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_10_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_11_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_11_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_11_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_12_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_12_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_12_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_12_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_13_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_13_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_13_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_13_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_14_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_14_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_14_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_15_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_15_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_15_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_16_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_16_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_16_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_16_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_17_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_17_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_17_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_17_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_18_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_18_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_18_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_19_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_19_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_19_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_20_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_20_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_20_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_20_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_21_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_21_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_21_bits_tail; // @[InputUnit.scala:181:28] wire [72:0] _input_buffer_io_deq_21_bits_payload; // @[InputUnit.scala:181:28] reg [2:0] states_8_g; // @[InputUnit.scala:192:19] reg states_8_vc_sel_1_9; // @[InputUnit.scala:192:19] reg states_8_vc_sel_1_20; // @[InputUnit.scala:192:19] reg states_8_vc_sel_1_21; // @[InputUnit.scala:192:19] reg [3:0] states_8_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_8_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_8_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_8_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_8_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_9_g; // @[InputUnit.scala:192:19] reg states_9_vc_sel_1_9; // @[InputUnit.scala:192:19] reg states_9_vc_sel_1_20; // @[InputUnit.scala:192:19] reg states_9_vc_sel_1_21; // @[InputUnit.scala:192:19] reg [3:0] states_9_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_9_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_9_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_9_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_9_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_12_g; // @[InputUnit.scala:192:19] reg states_12_vc_sel_1_13; // @[InputUnit.scala:192:19] reg states_12_vc_sel_1_20; // @[InputUnit.scala:192:19] reg states_12_vc_sel_1_21; // @[InputUnit.scala:192:19] reg [3:0] states_12_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_12_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_12_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_12_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_12_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_13_g; // @[InputUnit.scala:192:19] reg states_13_vc_sel_1_13; // @[InputUnit.scala:192:19] reg states_13_vc_sel_1_20; // @[InputUnit.scala:192:19] reg states_13_vc_sel_1_21; // @[InputUnit.scala:192:19] reg [3:0] states_13_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_13_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_13_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_13_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_13_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_16_g; // @[InputUnit.scala:192:19] reg states_16_vc_sel_1_17; // @[InputUnit.scala:192:19] reg states_16_vc_sel_1_20; // @[InputUnit.scala:192:19] reg states_16_vc_sel_1_21; // @[InputUnit.scala:192:19] reg [3:0] states_16_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_16_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_16_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_16_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_16_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_17_g; // @[InputUnit.scala:192:19] reg states_17_vc_sel_1_17; // @[InputUnit.scala:192:19] reg states_17_vc_sel_1_20; // @[InputUnit.scala:192:19] reg states_17_vc_sel_1_21; // @[InputUnit.scala:192:19] reg [3:0] states_17_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_17_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_17_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_17_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_17_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_20_g; // @[InputUnit.scala:192:19] reg states_20_vc_sel_1_9; // @[InputUnit.scala:192:19] reg states_20_vc_sel_1_13; // @[InputUnit.scala:192:19] reg states_20_vc_sel_1_17; // @[InputUnit.scala:192:19] reg states_20_vc_sel_1_20; // @[InputUnit.scala:192:19] reg states_20_vc_sel_1_21; // @[InputUnit.scala:192:19] reg [3:0] states_20_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_20_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_20_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_20_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_20_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_21_g; // @[InputUnit.scala:192:19] reg states_21_vc_sel_1_9; // @[InputUnit.scala:192:19] reg states_21_vc_sel_1_13; // @[InputUnit.scala:192:19] reg states_21_vc_sel_1_17; // @[InputUnit.scala:192:19] reg states_21_vc_sel_1_20; // @[InputUnit.scala:192:19] reg states_21_vc_sel_1_21; // @[InputUnit.scala:192:19] reg [3:0] states_21_flow_vnet_id; // @[InputUnit.scala:192:19] reg [5:0] states_21_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_21_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [5:0] states_21_flow_egress_node; // @[InputUnit.scala:192:19] reg [2:0] states_21_flow_egress_node_id; // @[InputUnit.scala:192:19] wire _GEN = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30] wire route_arbiter_io_in_8_valid = states_8_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_9_valid = states_9_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_12_valid = states_12_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_13_valid = states_13_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_16_valid = states_16_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_17_valid = states_17_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_20_valid = states_20_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] wire route_arbiter_io_in_21_valid = states_21_g == 3'h1; // @[InputUnit.scala:192:19, :229:22] reg [21:0] mask; // @[InputUnit.scala:250:21] wire [21:0] _vcalloc_filter_T_3 = {vcalloc_vals_21, vcalloc_vals_20, 2'h0, vcalloc_vals_17, vcalloc_vals_16, 2'h0, vcalloc_vals_13, vcalloc_vals_12, 2'h0, vcalloc_vals_9, vcalloc_vals_8, 8'h0} & ~mask; // @[InputUnit.scala:250:21, :253:{80,87,89}, :266:32] wire [43:0] vcalloc_filter = _vcalloc_filter_T_3[0] ? 44'h1 : _vcalloc_filter_T_3[1] ? 44'h2 : _vcalloc_filter_T_3[2] ? 44'h4 : _vcalloc_filter_T_3[3] ? 44'h8 : _vcalloc_filter_T_3[4] ? 44'h10 : _vcalloc_filter_T_3[5] ? 44'h20 : _vcalloc_filter_T_3[6] ? 44'h40 : _vcalloc_filter_T_3[7] ? 44'h80 : _vcalloc_filter_T_3[8] ? 44'h100 : _vcalloc_filter_T_3[9] ? 44'h200 : _vcalloc_filter_T_3[10] ? 44'h400 : _vcalloc_filter_T_3[11] ? 44'h800 : _vcalloc_filter_T_3[12] ? 44'h1000 : _vcalloc_filter_T_3[13] ? 44'h2000 : _vcalloc_filter_T_3[14] ? 44'h4000 : _vcalloc_filter_T_3[15] ? 44'h8000 : _vcalloc_filter_T_3[16] ? 44'h10000 : _vcalloc_filter_T_3[17] ? 44'h20000 : _vcalloc_filter_T_3[18] ? 44'h40000 : _vcalloc_filter_T_3[19] ? 44'h80000 : _vcalloc_filter_T_3[20] ? 44'h100000 : _vcalloc_filter_T_3[21] ? 44'h200000 : vcalloc_vals_8 ? 44'h40000000 : vcalloc_vals_9 ? 44'h80000000 : vcalloc_vals_12 ? 44'h400000000 : vcalloc_vals_13 ? 44'h800000000 : vcalloc_vals_16 ? 44'h4000000000 : vcalloc_vals_17 ? 44'h8000000000 : vcalloc_vals_20 ? 44'h40000000000 : {vcalloc_vals_21, 43'h0}; // @[OneHot.scala:85:71] wire [21:0] vcalloc_sel = vcalloc_filter[21:0] | vcalloc_filter[43:22]; // @[Mux.scala:50:70] wire io_vcalloc_req_valid_0 = vcalloc_vals_8 | vcalloc_vals_9 | vcalloc_vals_12 | vcalloc_vals_13 | vcalloc_vals_16 | vcalloc_vals_17 | vcalloc_vals_20 | vcalloc_vals_21; // @[package.scala:81:59] assign vcalloc_vals_8 = states_8_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_9 = states_9_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_12 = states_12_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_13 = states_13_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_16 = states_16_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_17 = states_17_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_20 = states_20_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] assign vcalloc_vals_21 = states_21_g == 3'h2; // @[InputUnit.scala:192:19, :266:32] wire _GEN_0 = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35] wire _GEN_1 = _GEN_0 & vcalloc_sel[8]; // @[Mux.scala:32:36] wire _GEN_2 = _GEN_0 & vcalloc_sel[9]; // @[Mux.scala:32:36] wire _GEN_3 = _GEN_0 & vcalloc_sel[12]; // @[Mux.scala:32:36] wire _GEN_4 = _GEN_0 & vcalloc_sel[13]; // @[Mux.scala:32:36] wire _GEN_5 = _GEN_0 & vcalloc_sel[16]; // @[Mux.scala:32:36] wire _GEN_6 = _GEN_0 & vcalloc_sel[17]; // @[Mux.scala:32:36] wire _GEN_7 = _GEN_0 & vcalloc_sel[20]; // @[Mux.scala:32:36] wire _GEN_8 = _GEN_0 & vcalloc_sel[21]; // @[Mux.scala:32:36]
Generate the Verilog code corresponding to the following Chisel files. File ListBuffer.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import freechips.rocketchip.util._ case class ListBufferParameters[T <: Data](gen: T, queues: Int, entries: Int, bypass: Boolean) { val queueBits = log2Up(queues) val entryBits = log2Up(entries) } class ListBufferPush[T <: Data](params: ListBufferParameters[T]) extends Bundle { val index = UInt(params.queueBits.W) val data = Output(params.gen) } class ListBuffer[T <: Data](params: ListBufferParameters[T]) extends Module { override def desiredName = s"ListBuffer_${params.gen.typeName}_q${params.queues}_e${params.entries}" val io = IO(new Bundle { // push is visible on the same cycle; flow queues val push = Flipped(Decoupled(new ListBufferPush(params))) val valid = UInt(params.queues.W) val pop = Flipped(Valid(UInt(params.queueBits.W))) val data = Output(params.gen) }) val valid = RegInit(0.U(params.queues.W)) val head = Mem(params.queues, UInt(params.entryBits.W)) val tail = Mem(params.queues, UInt(params.entryBits.W)) val used = RegInit(0.U(params.entries.W)) val next = Mem(params.entries, UInt(params.entryBits.W)) val data = Mem(params.entries, params.gen) val freeOH = ~(leftOR(~used) << 1) & ~used val freeIdx = OHToUInt(freeOH) val valid_set = WireDefault(0.U(params.queues.W)) val valid_clr = WireDefault(0.U(params.queues.W)) val used_set = WireDefault(0.U(params.entries.W)) val used_clr = WireDefault(0.U(params.entries.W)) val push_tail = tail.read(io.push.bits.index) val push_valid = valid(io.push.bits.index) io.push.ready := !used.andR when (io.push.fire) { valid_set := UIntToOH(io.push.bits.index, params.queues) used_set := freeOH data.write(freeIdx, io.push.bits.data) when (push_valid) { next.write(push_tail, freeIdx) } .otherwise { head.write(io.push.bits.index, freeIdx) } tail.write(io.push.bits.index, freeIdx) } val pop_head = head.read(io.pop.bits) val pop_valid = valid(io.pop.bits) // Bypass push data to the peek port io.data := (if (!params.bypass) data.read(pop_head) else Mux(!pop_valid, io.push.bits.data, data.read(pop_head))) io.valid := (if (!params.bypass) valid else (valid | valid_set)) // It is an error to pop something that is not valid assert (!io.pop.fire || (io.valid)(io.pop.bits)) when (io.pop.fire) { used_clr := UIntToOH(pop_head, params.entries) when (pop_head === tail.read(io.pop.bits)) { valid_clr := UIntToOH(io.pop.bits, params.queues) } head.write(io.pop.bits, Mux(io.push.fire && push_valid && push_tail === pop_head, freeIdx, next.read(pop_head))) } // Empty bypass changes no state when ((!params.bypass).B || !io.pop.valid || pop_valid) { used := (used & ~used_clr) | used_set valid := (valid & ~valid_clr) | valid_set } }
module ListBuffer_PutBufferCEntry_q2_e8_3( // @[ListBuffer.scala:36:7] input clock, // @[ListBuffer.scala:36:7] input reset, // @[ListBuffer.scala:36:7] output io_push_ready, // @[ListBuffer.scala:39:14] input io_push_valid, // @[ListBuffer.scala:39:14] input io_push_bits_index, // @[ListBuffer.scala:39:14] input [127:0] io_push_bits_data_data, // @[ListBuffer.scala:39:14] input io_push_bits_data_corrupt, // @[ListBuffer.scala:39:14] output [1:0] io_valid, // @[ListBuffer.scala:39:14] input io_pop_valid, // @[ListBuffer.scala:39:14] input io_pop_bits, // @[ListBuffer.scala:39:14] output [127:0] io_data_data, // @[ListBuffer.scala:39:14] output io_data_corrupt // @[ListBuffer.scala:39:14] ); wire [128:0] _data_ext_R0_data; // @[ListBuffer.scala:52:18] wire [2:0] _next_ext_R0_data; // @[ListBuffer.scala:51:18] wire [2:0] _tail_ext_R0_data; // @[ListBuffer.scala:49:18] wire [2:0] _tail_ext_R1_data; // @[ListBuffer.scala:49:18] wire [2:0] _head_ext_R0_data; // @[ListBuffer.scala:48:18] wire io_push_valid_0 = io_push_valid; // @[ListBuffer.scala:36:7] wire io_push_bits_index_0 = io_push_bits_index; // @[ListBuffer.scala:36:7] wire [127:0] io_push_bits_data_data_0 = io_push_bits_data_data; // @[ListBuffer.scala:36:7] wire io_push_bits_data_corrupt_0 = io_push_bits_data_corrupt; // @[ListBuffer.scala:36:7] wire io_pop_valid_0 = io_pop_valid; // @[ListBuffer.scala:36:7] wire io_pop_bits_0 = io_pop_bits; // @[ListBuffer.scala:36:7] wire _io_push_ready_T_1; // @[ListBuffer.scala:65:20] wire valid_set_shiftAmount = io_push_bits_index_0; // @[OneHot.scala:64:49] wire valid_clr_shiftAmount = io_pop_bits_0; // @[OneHot.scala:64:49] wire io_push_ready_0; // @[ListBuffer.scala:36:7] wire [127:0] io_data_data_0; // @[ListBuffer.scala:36:7] wire io_data_corrupt_0; // @[ListBuffer.scala:36:7] wire [1:0] io_valid_0; // @[ListBuffer.scala:36:7] reg [1:0] valid; // @[ListBuffer.scala:47:22] assign io_valid_0 = valid; // @[ListBuffer.scala:36:7, :47:22] reg [7:0] used; // @[ListBuffer.scala:50:22] assign io_data_data_0 = _data_ext_R0_data[127:0]; // @[ListBuffer.scala:36:7, :52:18] assign io_data_corrupt_0 = _data_ext_R0_data[128]; // @[ListBuffer.scala:36:7, :52:18] wire [7:0] _freeOH_T = ~used; // @[ListBuffer.scala:50:22, :54:25] wire [8:0] _freeOH_T_1 = {_freeOH_T, 1'h0}; // @[package.scala:253:48] wire [7:0] _freeOH_T_2 = _freeOH_T_1[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _freeOH_T_3 = _freeOH_T | _freeOH_T_2; // @[package.scala:253:{43,53}] wire [9:0] _freeOH_T_4 = {_freeOH_T_3, 2'h0}; // @[package.scala:253:{43,48}] wire [7:0] _freeOH_T_5 = _freeOH_T_4[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _freeOH_T_6 = _freeOH_T_3 | _freeOH_T_5; // @[package.scala:253:{43,53}] wire [11:0] _freeOH_T_7 = {_freeOH_T_6, 4'h0}; // @[package.scala:253:{43,48}] wire [7:0] _freeOH_T_8 = _freeOH_T_7[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _freeOH_T_9 = _freeOH_T_6 | _freeOH_T_8; // @[package.scala:253:{43,53}] wire [7:0] _freeOH_T_10 = _freeOH_T_9; // @[package.scala:253:43, :254:17] wire [8:0] _freeOH_T_11 = {_freeOH_T_10, 1'h0}; // @[package.scala:254:17] wire [8:0] _freeOH_T_12 = ~_freeOH_T_11; // @[ListBuffer.scala:54:{16,32}] wire [7:0] _freeOH_T_13 = ~used; // @[ListBuffer.scala:50:22, :54:{25,40}] wire [8:0] freeOH = {1'h0, _freeOH_T_12[7:0] & _freeOH_T_13}; // @[ListBuffer.scala:54:{16,38,40}] wire freeIdx_hi = freeOH[8]; // @[OneHot.scala:30:18] wire _freeIdx_T = freeIdx_hi; // @[OneHot.scala:30:18, :32:14] wire [7:0] freeIdx_lo = freeOH[7:0]; // @[OneHot.scala:31:18] wire [7:0] _freeIdx_T_1 = {7'h0, freeIdx_hi} | freeIdx_lo; // @[OneHot.scala:30:18, :31:18, :32:28] wire [3:0] freeIdx_hi_1 = _freeIdx_T_1[7:4]; // @[OneHot.scala:30:18, :32:28] wire [3:0] freeIdx_lo_1 = _freeIdx_T_1[3:0]; // @[OneHot.scala:31:18, :32:28] wire _freeIdx_T_2 = |freeIdx_hi_1; // @[OneHot.scala:30:18, :32:14] wire [3:0] _freeIdx_T_3 = freeIdx_hi_1 | freeIdx_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28] wire [1:0] freeIdx_hi_2 = _freeIdx_T_3[3:2]; // @[OneHot.scala:30:18, :32:28] wire [1:0] freeIdx_lo_2 = _freeIdx_T_3[1:0]; // @[OneHot.scala:31:18, :32:28] wire _freeIdx_T_4 = |freeIdx_hi_2; // @[OneHot.scala:30:18, :32:14] wire [1:0] _freeIdx_T_5 = freeIdx_hi_2 | freeIdx_lo_2; // @[OneHot.scala:30:18, :31:18, :32:28] wire _freeIdx_T_6 = _freeIdx_T_5[1]; // @[OneHot.scala:32:28] wire [1:0] _freeIdx_T_7 = {_freeIdx_T_4, _freeIdx_T_6}; // @[OneHot.scala:32:{10,14}] wire [2:0] _freeIdx_T_8 = {_freeIdx_T_2, _freeIdx_T_7}; // @[OneHot.scala:32:{10,14}] wire [3:0] freeIdx = {_freeIdx_T, _freeIdx_T_8}; // @[OneHot.scala:32:{10,14}] wire [1:0] valid_set; // @[ListBuffer.scala:57:30] wire [1:0] valid_clr; // @[ListBuffer.scala:58:30] wire [7:0] used_set; // @[ListBuffer.scala:59:30] wire [7:0] used_clr; // @[ListBuffer.scala:60:30] wire [1:0] _push_valid_T = valid >> io_push_bits_index_0; // @[ListBuffer.scala:36:7, :47:22, :63:25] wire push_valid = _push_valid_T[0]; // @[ListBuffer.scala:63:25] wire _io_push_ready_T = &used; // @[ListBuffer.scala:50:22, :65:26] assign _io_push_ready_T_1 = ~_io_push_ready_T; // @[ListBuffer.scala:65:{20,26}] assign io_push_ready_0 = _io_push_ready_T_1; // @[ListBuffer.scala:36:7, :65:20] wire data_MPORT_en = io_push_ready_0 & io_push_valid_0; // @[Decoupled.scala:51:35] wire [1:0] _valid_set_T = 2'h1 << valid_set_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [1:0] _valid_set_T_1 = _valid_set_T; // @[OneHot.scala:65:{12,27}] assign valid_set = data_MPORT_en ? _valid_set_T_1 : 2'h0; // @[OneHot.scala:65:27] assign used_set = data_MPORT_en ? freeIdx_lo : 8'h0; // @[OneHot.scala:31:18] wire [1:0] _GEN = {1'h0, io_pop_bits_0}; // @[ListBuffer.scala:36:7, :79:24] wire [1:0] _pop_valid_T = valid >> _GEN; // @[ListBuffer.scala:47:22, :79:24] wire pop_valid = _pop_valid_T[0]; // @[ListBuffer.scala:79:24]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_435( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19] wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_45( // @[SynchronizerReg.scala:68:19] input clock, // @[SynchronizerReg.scala:68:19] input reset, // @[SynchronizerReg.scala:68:19] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19] wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19] wire io_q_0; // @[SynchronizerReg.scala:68:19] reg sync_0; // @[SynchronizerReg.scala:51:87] assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19] reg sync_1; // @[SynchronizerReg.scala:51:87] reg sync_2; // @[SynchronizerReg.scala:51:87] always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19] if (reset) begin // @[SynchronizerReg.scala:68:19] sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87] sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87] end else begin // @[SynchronizerReg.scala:68:19] sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87] sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87] sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22] end always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_47( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_64 io_out_source_valid ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module MacUnit_79( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [7:0] io_in_a, // @[PE.scala:16:14] input [7:0] io_in_b, // @[PE.scala:16:14] input [19:0] io_in_c, // @[PE.scala:16:14] output [19:0] io_out_d // @[PE.scala:16:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7] wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7] wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54] wire [19:0] io_out_d_0; // @[PE.scala:14:7] wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7] wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54] assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54] assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7] assign io_out_d = io_out_d_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ClockDomain.scala: package freechips.rocketchip.prci import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing { def clockBundle: ClockBundle lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { childClock := clockBundle.clock childReset := clockBundle.reset override def provideImplicitClockToLazyChildren = true // these are just for backwards compatibility with external devices // that were manually wiring themselves to the domain's clock/reset input: val clock = IO(Output(chiselTypeOf(clockBundle.clock))) val reset = IO(Output(chiselTypeOf(clockBundle.reset))) clock := clockBundle.clock reset := clockBundle.reset } } abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain { def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name)) val clockNode = ClockSinkNode(Seq(clockSinkParams)) def clockBundle = clockNode.in.head._1 override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString } class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain { def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name)) val clockNode = ClockSourceNode(Seq(clockSourceParams)) def clockBundle = clockNode.out.head._1 override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString } abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File NoC.scala: package constellation.noc import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp, BundleBridgeSink, InModuleBody} import freechips.rocketchip.util.ElaborationArtefacts import freechips.rocketchip.prci._ import constellation.router._ import constellation.channel._ import constellation.routing.{RoutingRelation, ChannelRoutingInfo} import constellation.topology.{PhysicalTopology, UnidirectionalLine} class NoCTerminalIO( val ingressParams: Seq[IngressChannelParams], val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle { val ingress = MixedVec(ingressParams.map { u => Flipped(new IngressChannel(u)) }) val egress = MixedVec(egressParams.map { u => new EgressChannel(u) }) } class NoC(nocParams: NoCParams)(implicit p: Parameters) extends LazyModule { override def shouldBeInlined = nocParams.inlineNoC val internalParams = InternalNoCParams(nocParams) val allChannelParams = internalParams.channelParams val allIngressParams = internalParams.ingressParams val allEgressParams = internalParams.egressParams val allRouterParams = internalParams.routerParams val iP = p.alterPartial({ case InternalNoCKey => internalParams }) val nNodes = nocParams.topology.nNodes val nocName = nocParams.nocName val skipValidationChecks = nocParams.skipValidationChecks val clockSourceNodes = Seq.tabulate(nNodes) { i => ClockSourceNode(Seq(ClockSourceParameters())) } val router_sink_domains = Seq.tabulate(nNodes) { i => val router_sink_domain = LazyModule(new ClockSinkDomain(ClockSinkParameters( name = Some(s"${nocName}_router_$i") ))) router_sink_domain.clockNode := clockSourceNodes(i) router_sink_domain } val routers = Seq.tabulate(nNodes) { i => router_sink_domains(i) { val inParams = allChannelParams.filter(_.destId == i).map( _.copy(payloadBits=allRouterParams(i).user.payloadBits) ) val outParams = allChannelParams.filter(_.srcId == i).map( _.copy(payloadBits=allRouterParams(i).user.payloadBits) ) val ingressParams = allIngressParams.filter(_.destId == i).map( _.copy(payloadBits=allRouterParams(i).user.payloadBits) ) val egressParams = allEgressParams.filter(_.srcId == i).map( _.copy(payloadBits=allRouterParams(i).user.payloadBits) ) val noIn = inParams.size + ingressParams.size == 0 val noOut = outParams.size + egressParams.size == 0 if (noIn || noOut) { println(s"Constellation WARNING: $nocName router $i seems to be unused, it will not be generated") None } else { Some(LazyModule(new Router( routerParams = allRouterParams(i), preDiplomaticInParams = inParams, preDiplomaticIngressParams = ingressParams, outDests = outParams.map(_.destId), egressIds = egressParams.map(_.egressId) )(iP))) } }}.flatten val ingressNodes = allIngressParams.map { u => IngressChannelSourceNode(u.destId) } val egressNodes = allEgressParams.map { u => EgressChannelDestNode(u) } // Generate channels between routers diplomatically Seq.tabulate(nNodes, nNodes) { case (i, j) => if (i != j) { val routerI = routers.find(_.nodeId == i) val routerJ = routers.find(_.nodeId == j) if (routerI.isDefined && routerJ.isDefined) { val sourceNodes: Seq[ChannelSourceNode] = routerI.get.sourceNodes.filter(_.destId == j) val destNodes: Seq[ChannelDestNode] = routerJ.get.destNodes.filter(_.destParams.srcId == i) require (sourceNodes.size == destNodes.size) (sourceNodes zip destNodes).foreach { case (src, dst) => val channelParam = allChannelParams.find(c => c.srcId == i && c.destId == j).get router_sink_domains(j) { implicit val p: Parameters = iP (dst := ChannelWidthWidget(routerJ.get.payloadBits, routerI.get.payloadBits) := channelParam.channelGen(p)(src) ) } } } }} // Generate terminal channels diplomatically routers.foreach { dst => router_sink_domains(dst.nodeId) { implicit val p: Parameters = iP dst.ingressNodes.foreach(n => { val ingressId = n.destParams.ingressId require(dst.payloadBits <= allIngressParams(ingressId).payloadBits) (n := IngressWidthWidget(dst.payloadBits, allIngressParams(ingressId).payloadBits) := ingressNodes(ingressId) ) }) dst.egressNodes.foreach(n => { val egressId = n.egressId require(dst.payloadBits <= allEgressParams(egressId).payloadBits) (egressNodes(egressId) := EgressWidthWidget(allEgressParams(egressId).payloadBits, dst.payloadBits) := n ) }) }} val debugNodes = routers.map { r => val sink = BundleBridgeSink[DebugBundle]() sink := r.debugNode sink } val ctrlNodes = if (nocParams.hasCtrl) { (0 until nNodes).map { i => routers.find(_.nodeId == i).map { r => val sink = BundleBridgeSink[RouterCtrlBundle]() sink := r.ctrlNode.get sink } } } else { Nil } println(s"Constellation: $nocName Finished parameter validation") lazy val module = new Impl class Impl extends LazyModuleImp(this) { println(s"Constellation: $nocName Starting NoC RTL generation") val io = IO(new NoCTerminalIO(allIngressParams, allEgressParams)(iP) { val router_clocks = Vec(nNodes, Input(new ClockBundle(ClockBundleParameters()))) val router_ctrl = if (nocParams.hasCtrl) Vec(nNodes, new RouterCtrlBundle) else Nil }) (io.ingress zip ingressNodes.map(_.out(0)._1)).foreach { case (l,r) => r <> l } (io.egress zip egressNodes .map(_.in (0)._1)).foreach { case (l,r) => l <> r } (io.router_clocks zip clockSourceNodes.map(_.out(0)._1)).foreach { case (l,r) => l <> r } if (nocParams.hasCtrl) { ctrlNodes.zipWithIndex.map { case (c,i) => if (c.isDefined) { io.router_ctrl(i) <> c.get.in(0)._1 } else { io.router_ctrl(i) <> DontCare } } } // TODO: These assume a single clock-domain across the entire noc val debug_va_stall_ctr = RegInit(0.U(64.W)) val debug_sa_stall_ctr = RegInit(0.U(64.W)) val debug_any_stall_ctr = debug_va_stall_ctr + debug_sa_stall_ctr debug_va_stall_ctr := debug_va_stall_ctr + debugNodes.map(_.in(0)._1.va_stall.reduce(_+_)).reduce(_+_) debug_sa_stall_ctr := debug_sa_stall_ctr + debugNodes.map(_.in(0)._1.sa_stall.reduce(_+_)).reduce(_+_) dontTouch(debug_va_stall_ctr) dontTouch(debug_sa_stall_ctr) dontTouch(debug_any_stall_ctr) def prepend(s: String) = Seq(nocName, s).mkString(".") ElaborationArtefacts.add(prepend("noc.graphml"), graphML) val adjList = routers.map { r => val outs = r.outParams.map(o => s"${o.destId}").mkString(" ") val egresses = r.egressParams.map(e => s"e${e.egressId}").mkString(" ") val ingresses = r.ingressParams.map(i => s"i${i.ingressId} ${r.nodeId}") (Seq(s"${r.nodeId} $outs $egresses") ++ ingresses).mkString("\n") }.mkString("\n") ElaborationArtefacts.add(prepend("noc.adjlist"), adjList) val xys = routers.map(r => { val n = r.nodeId val ids = (Seq(r.nodeId.toString) ++ r.egressParams.map(e => s"e${e.egressId}") ++ r.ingressParams.map(i => s"i${i.ingressId}") ) val plotter = nocParams.topology.plotter val coords = (Seq(plotter.node(r.nodeId)) ++ Seq.tabulate(r.egressParams.size ) { i => plotter. egress(i, r. egressParams.size, r.nodeId) } ++ Seq.tabulate(r.ingressParams.size) { i => plotter.ingress(i, r.ingressParams.size, r.nodeId) } ) (ids zip coords).map { case (i, (x, y)) => s"$i $x $y" }.mkString("\n") }).mkString("\n") ElaborationArtefacts.add(prepend("noc.xy"), xys) val edgeProps = routers.map { r => val outs = r.outParams.map { o => (Seq(s"${r.nodeId} ${o.destId}") ++ (if (o.possibleFlows.size == 0) Some("unused") else None)) .mkString(" ") } val egresses = r.egressParams.map { e => (Seq(s"${r.nodeId} e${e.egressId}") ++ (if (e.possibleFlows.size == 0) Some("unused") else None)) .mkString(" ") } val ingresses = r.ingressParams.map { i => (Seq(s"i${i.ingressId} ${r.nodeId}") ++ (if (i.possibleFlows.size == 0) Some("unused") else None)) .mkString(" ") } (outs ++ egresses ++ ingresses).mkString("\n") }.mkString("\n") ElaborationArtefacts.add(prepend("noc.edgeprops"), edgeProps) println(s"Constellation: $nocName Finished NoC RTL generation") } }
module TLSplitACDxBENoC_acd_router_13ClockSinkDomain( // @[ClockDomain.scala:14:9] output [1:0] auto_routers_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_debug_out_va_stall_3, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_debug_out_va_stall_4, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_debug_out_sa_stall_3, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_debug_out_sa_stall_4, // @[LazyModuleImp.scala:107:25] input auto_routers_egress_nodes_out_flit_ready, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_flit_valid, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_flit_bits_head, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_flit_bits_tail, // @[LazyModuleImp.scala:107:25] output [144:0] auto_routers_egress_nodes_out_flit_bits_payload, // @[LazyModuleImp.scala:107:25] output auto_routers_ingress_nodes_in_1_flit_ready, // @[LazyModuleImp.scala:107:25] input auto_routers_ingress_nodes_in_1_flit_valid, // @[LazyModuleImp.scala:107:25] input auto_routers_ingress_nodes_in_1_flit_bits_head, // @[LazyModuleImp.scala:107:25] input auto_routers_ingress_nodes_in_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25] input [144:0] auto_routers_ingress_nodes_in_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25] input [4:0] auto_routers_ingress_nodes_in_1_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25] output auto_routers_ingress_nodes_in_0_flit_ready, // @[LazyModuleImp.scala:107:25] input auto_routers_ingress_nodes_in_0_flit_valid, // @[LazyModuleImp.scala:107:25] input auto_routers_ingress_nodes_in_0_flit_bits_head, // @[LazyModuleImp.scala:107:25] input auto_routers_ingress_nodes_in_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25] input [144:0] auto_routers_ingress_nodes_in_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25] input [4:0] auto_routers_ingress_nodes_in_0_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_2_flit_0_valid, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] output [144:0] auto_routers_source_nodes_out_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_source_nodes_out_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_routers_source_nodes_out_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_source_nodes_out_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_routers_source_nodes_out_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_source_nodes_out_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_source_nodes_out_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_source_nodes_out_2_credit_return, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_source_nodes_out_2_vc_free, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_1_flit_0_valid, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] output [144:0] auto_routers_source_nodes_out_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_source_nodes_out_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_source_nodes_out_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_source_nodes_out_1_credit_return, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_source_nodes_out_1_vc_free, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_0_flit_0_valid, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] output [144:0] auto_routers_source_nodes_out_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_source_nodes_out_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_source_nodes_out_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_source_nodes_out_0_credit_return, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_source_nodes_out_0_vc_free, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_2_flit_0_valid, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] input [144:0] auto_routers_dest_nodes_in_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] input [1:0] auto_routers_dest_nodes_in_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_routers_dest_nodes_in_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_dest_nodes_in_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_routers_dest_nodes_in_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_routers_dest_nodes_in_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] input [1:0] auto_routers_dest_nodes_in_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_dest_nodes_in_2_credit_return, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_dest_nodes_in_2_vc_free, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_1_flit_0_valid, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] input [144:0] auto_routers_dest_nodes_in_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] input [1:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_routers_dest_nodes_in_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] input [1:0] auto_routers_dest_nodes_in_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_dest_nodes_in_1_credit_return, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_dest_nodes_in_1_vc_free, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_0_flit_0_valid, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] input [144:0] auto_routers_dest_nodes_in_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] input [1:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_routers_dest_nodes_in_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] input [1:0] auto_routers_dest_nodes_in_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_dest_nodes_in_0_credit_return, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_dest_nodes_in_0_vc_free, // @[LazyModuleImp.scala:107:25] input auto_clock_in_clock, // @[LazyModuleImp.scala:107:25] input auto_clock_in_reset // @[LazyModuleImp.scala:107:25] ); Router_13 routers ( // @[NoC.scala:67:22] .clock (auto_clock_in_clock), .reset (auto_clock_in_reset), .auto_debug_out_va_stall_0 (auto_routers_debug_out_va_stall_0), .auto_debug_out_va_stall_1 (auto_routers_debug_out_va_stall_1), .auto_debug_out_va_stall_2 (auto_routers_debug_out_va_stall_2), .auto_debug_out_va_stall_3 (auto_routers_debug_out_va_stall_3), .auto_debug_out_va_stall_4 (auto_routers_debug_out_va_stall_4), .auto_debug_out_sa_stall_0 (auto_routers_debug_out_sa_stall_0), .auto_debug_out_sa_stall_1 (auto_routers_debug_out_sa_stall_1), .auto_debug_out_sa_stall_2 (auto_routers_debug_out_sa_stall_2), .auto_debug_out_sa_stall_3 (auto_routers_debug_out_sa_stall_3), .auto_debug_out_sa_stall_4 (auto_routers_debug_out_sa_stall_4), .auto_egress_nodes_out_flit_ready (auto_routers_egress_nodes_out_flit_ready), .auto_egress_nodes_out_flit_valid (auto_routers_egress_nodes_out_flit_valid), .auto_egress_nodes_out_flit_bits_head (auto_routers_egress_nodes_out_flit_bits_head), .auto_egress_nodes_out_flit_bits_tail (auto_routers_egress_nodes_out_flit_bits_tail), .auto_egress_nodes_out_flit_bits_payload (auto_routers_egress_nodes_out_flit_bits_payload), .auto_ingress_nodes_in_1_flit_ready (auto_routers_ingress_nodes_in_1_flit_ready), .auto_ingress_nodes_in_1_flit_valid (auto_routers_ingress_nodes_in_1_flit_valid), .auto_ingress_nodes_in_1_flit_bits_head (auto_routers_ingress_nodes_in_1_flit_bits_head), .auto_ingress_nodes_in_1_flit_bits_tail (auto_routers_ingress_nodes_in_1_flit_bits_tail), .auto_ingress_nodes_in_1_flit_bits_payload (auto_routers_ingress_nodes_in_1_flit_bits_payload), .auto_ingress_nodes_in_1_flit_bits_egress_id (auto_routers_ingress_nodes_in_1_flit_bits_egress_id), .auto_ingress_nodes_in_0_flit_ready (auto_routers_ingress_nodes_in_0_flit_ready), .auto_ingress_nodes_in_0_flit_valid (auto_routers_ingress_nodes_in_0_flit_valid), .auto_ingress_nodes_in_0_flit_bits_head (auto_routers_ingress_nodes_in_0_flit_bits_head), .auto_ingress_nodes_in_0_flit_bits_tail (auto_routers_ingress_nodes_in_0_flit_bits_tail), .auto_ingress_nodes_in_0_flit_bits_payload (auto_routers_ingress_nodes_in_0_flit_bits_payload), .auto_ingress_nodes_in_0_flit_bits_egress_id (auto_routers_ingress_nodes_in_0_flit_bits_egress_id), .auto_source_nodes_out_2_flit_0_valid (auto_routers_source_nodes_out_2_flit_0_valid), .auto_source_nodes_out_2_flit_0_bits_head (auto_routers_source_nodes_out_2_flit_0_bits_head), .auto_source_nodes_out_2_flit_0_bits_tail (auto_routers_source_nodes_out_2_flit_0_bits_tail), .auto_source_nodes_out_2_flit_0_bits_payload (auto_routers_source_nodes_out_2_flit_0_bits_payload), .auto_source_nodes_out_2_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_2_flit_0_bits_flow_vnet_id), .auto_source_nodes_out_2_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_2_flit_0_bits_flow_ingress_node), .auto_source_nodes_out_2_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_2_flit_0_bits_flow_ingress_node_id), .auto_source_nodes_out_2_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_2_flit_0_bits_flow_egress_node), .auto_source_nodes_out_2_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_2_flit_0_bits_flow_egress_node_id), .auto_source_nodes_out_2_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_2_flit_0_bits_virt_channel_id), .auto_source_nodes_out_2_credit_return (auto_routers_source_nodes_out_2_credit_return), .auto_source_nodes_out_2_vc_free (auto_routers_source_nodes_out_2_vc_free), .auto_source_nodes_out_1_flit_0_valid (auto_routers_source_nodes_out_1_flit_0_valid), .auto_source_nodes_out_1_flit_0_bits_head (auto_routers_source_nodes_out_1_flit_0_bits_head), .auto_source_nodes_out_1_flit_0_bits_tail (auto_routers_source_nodes_out_1_flit_0_bits_tail), .auto_source_nodes_out_1_flit_0_bits_payload (auto_routers_source_nodes_out_1_flit_0_bits_payload), .auto_source_nodes_out_1_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_1_flit_0_bits_flow_vnet_id), .auto_source_nodes_out_1_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_1_flit_0_bits_flow_ingress_node), .auto_source_nodes_out_1_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_1_flit_0_bits_flow_ingress_node_id), .auto_source_nodes_out_1_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_1_flit_0_bits_flow_egress_node), .auto_source_nodes_out_1_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_1_flit_0_bits_flow_egress_node_id), .auto_source_nodes_out_1_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_1_flit_0_bits_virt_channel_id), .auto_source_nodes_out_1_credit_return (auto_routers_source_nodes_out_1_credit_return), .auto_source_nodes_out_1_vc_free (auto_routers_source_nodes_out_1_vc_free), .auto_source_nodes_out_0_flit_0_valid (auto_routers_source_nodes_out_0_flit_0_valid), .auto_source_nodes_out_0_flit_0_bits_head (auto_routers_source_nodes_out_0_flit_0_bits_head), .auto_source_nodes_out_0_flit_0_bits_tail (auto_routers_source_nodes_out_0_flit_0_bits_tail), .auto_source_nodes_out_0_flit_0_bits_payload (auto_routers_source_nodes_out_0_flit_0_bits_payload), .auto_source_nodes_out_0_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_0_flit_0_bits_flow_vnet_id), .auto_source_nodes_out_0_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_0_flit_0_bits_flow_ingress_node), .auto_source_nodes_out_0_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_0_flit_0_bits_flow_ingress_node_id), .auto_source_nodes_out_0_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_0_flit_0_bits_flow_egress_node), .auto_source_nodes_out_0_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_0_flit_0_bits_flow_egress_node_id), .auto_source_nodes_out_0_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_0_flit_0_bits_virt_channel_id), .auto_source_nodes_out_0_credit_return (auto_routers_source_nodes_out_0_credit_return), .auto_source_nodes_out_0_vc_free (auto_routers_source_nodes_out_0_vc_free), .auto_dest_nodes_in_2_flit_0_valid (auto_routers_dest_nodes_in_2_flit_0_valid), .auto_dest_nodes_in_2_flit_0_bits_head (auto_routers_dest_nodes_in_2_flit_0_bits_head), .auto_dest_nodes_in_2_flit_0_bits_tail (auto_routers_dest_nodes_in_2_flit_0_bits_tail), .auto_dest_nodes_in_2_flit_0_bits_payload (auto_routers_dest_nodes_in_2_flit_0_bits_payload), .auto_dest_nodes_in_2_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_2_flit_0_bits_flow_vnet_id), .auto_dest_nodes_in_2_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_2_flit_0_bits_flow_ingress_node), .auto_dest_nodes_in_2_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_2_flit_0_bits_flow_ingress_node_id), .auto_dest_nodes_in_2_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_2_flit_0_bits_flow_egress_node), .auto_dest_nodes_in_2_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_2_flit_0_bits_flow_egress_node_id), .auto_dest_nodes_in_2_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_2_flit_0_bits_virt_channel_id), .auto_dest_nodes_in_2_credit_return (auto_routers_dest_nodes_in_2_credit_return), .auto_dest_nodes_in_2_vc_free (auto_routers_dest_nodes_in_2_vc_free), .auto_dest_nodes_in_1_flit_0_valid (auto_routers_dest_nodes_in_1_flit_0_valid), .auto_dest_nodes_in_1_flit_0_bits_head (auto_routers_dest_nodes_in_1_flit_0_bits_head), .auto_dest_nodes_in_1_flit_0_bits_tail (auto_routers_dest_nodes_in_1_flit_0_bits_tail), .auto_dest_nodes_in_1_flit_0_bits_payload (auto_routers_dest_nodes_in_1_flit_0_bits_payload), .auto_dest_nodes_in_1_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_1_flit_0_bits_flow_vnet_id), .auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_1_flit_0_bits_flow_ingress_node), .auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id), .auto_dest_nodes_in_1_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_1_flit_0_bits_flow_egress_node), .auto_dest_nodes_in_1_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_1_flit_0_bits_flow_egress_node_id), .auto_dest_nodes_in_1_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_1_flit_0_bits_virt_channel_id), .auto_dest_nodes_in_1_credit_return (auto_routers_dest_nodes_in_1_credit_return), .auto_dest_nodes_in_1_vc_free (auto_routers_dest_nodes_in_1_vc_free), .auto_dest_nodes_in_0_flit_0_valid (auto_routers_dest_nodes_in_0_flit_0_valid), .auto_dest_nodes_in_0_flit_0_bits_head (auto_routers_dest_nodes_in_0_flit_0_bits_head), .auto_dest_nodes_in_0_flit_0_bits_tail (auto_routers_dest_nodes_in_0_flit_0_bits_tail), .auto_dest_nodes_in_0_flit_0_bits_payload (auto_routers_dest_nodes_in_0_flit_0_bits_payload), .auto_dest_nodes_in_0_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_0_flit_0_bits_flow_vnet_id), .auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_0_flit_0_bits_flow_ingress_node), .auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id), .auto_dest_nodes_in_0_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_0_flit_0_bits_flow_egress_node), .auto_dest_nodes_in_0_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_0_flit_0_bits_flow_egress_node_id), .auto_dest_nodes_in_0_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_0_flit_0_bits_virt_channel_id), .auto_dest_nodes_in_0_credit_return (auto_routers_dest_nodes_in_0_credit_return), .auto_dest_nodes_in_0_vc_free (auto_routers_dest_nodes_in_0_vc_free) ); // @[NoC.scala:67:22] endmodule
Generate the Verilog code corresponding to the following Chisel files. File tage.scala: package boom.v4.ifu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import boom.v4.common._ import boom.v4.util.{BoomCoreStringPrefix, MaskLower, WrapInc} import scala.math.min class TageResp extends Bundle { val ctr = UInt(3.W) val u = UInt(2.W) } class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int, val singlePorted: Boolean) (implicit p: Parameters) extends BoomModule()(p) with HasBoomFrontendParameters { require(histLength <= globalHistoryLength) val nWrBypassEntries = 2 val io = IO( new Bundle { val f1_req_valid = Input(Bool()) val f1_req_pc = Input(UInt(vaddrBitsExtended.W)) val f1_req_ghist = Input(UInt(globalHistoryLength.W)) val f2_resp = Output(Vec(bankWidth, Valid(new TageResp))) val update_mask = Input(Vec(bankWidth, Bool())) val update_taken = Input(Vec(bankWidth, Bool())) val update_alloc = Input(Vec(bankWidth, Bool())) val update_old_ctr = Input(Vec(bankWidth, UInt(3.W))) val update_pc = Input(UInt()) val update_hist = Input(UInt()) val update_u_mask = Input(Vec(bankWidth, Bool())) val update_u = Input(Vec(bankWidth, UInt(2.W))) }) def compute_folded_hist(hist: UInt, l: Int) = { val nChunks = (histLength + l - 1) / l val hist_chunks = (0 until nChunks) map {i => hist(min((i+1)*l, histLength)-1, i*l) } hist_chunks.reduce(_^_) } def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = { val idx_history = compute_folded_hist(hist, log2Ceil(nRows)) val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0) val tag_history = compute_folded_hist(hist, tagSz) val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0) (idx, tag) } def inc_ctr(ctr: UInt, taken: Bool): UInt = { Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U), Mux(ctr === 7.U, 7.U, ctr + 1.U)) } val doing_reset = RegInit(true.B) val reset_idx = RegInit(0.U(log2Ceil(nRows).W)) reset_idx := reset_idx + doing_reset when (reset_idx === (nRows-1).U) { doing_reset := false.B } class TageEntry extends Bundle { val valid = Bool() // TODO: Remove this valid bit val tag = UInt(tagSz.W) val ctr = UInt(3.W) } val tageEntrySz = 1 + tagSz + 3 val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist) val us = SyncReadMem(nRows, Vec(bankWidth*2, Bool())) val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W))) us.suggestName(s"tage_u_${histLength}") table.suggestName(s"tage_table_${histLength}") val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz)) val s2_tag = RegNext(s1_tag) val s2_req_rtage = Wire(Vec(bankWidth, new TageEntry)) val s2_req_rus = Wire(Vec(bankWidth*2, Bool())) val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset)) for (w <- 0 until bankWidth) { // This bit indicates the TAGE table matched here io.f2_resp(w).valid := s2_req_rhits(w) io.f2_resp(w).bits.u := Cat(s2_req_rus(w*2+1), s2_req_rus(w*2)) io.f2_resp(w).bits.ctr := s2_req_rtage(w).ctr } val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W)) when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U } val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U val clear_u_hi = clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U val clear_u_lo = clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod) val clear_u_mask = VecInit((0 until bankWidth*2) map { i => if (i % 2 == 0) clear_u_lo else clear_u_hi }).asUInt val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist) val update_wdata = Wire(Vec(bankWidth, new TageEntry)) val wen = WireInit(doing_reset || io.update_mask.reduce(_||_)) val rdata = if (singlePorted) table.read(s1_hashed_idx, !wen && io.f1_req_valid) else table.read(s1_hashed_idx, io.f1_req_valid) when (RegNext(wen) && singlePorted.B) { s2_req_rtage := 0.U.asTypeOf(Vec(bankWidth, new TageEntry)) } .otherwise { s2_req_rtage := VecInit(rdata.map(_.asTypeOf(new TageEntry))) } when (wen) { val widx = Mux(doing_reset, reset_idx, update_idx) val wdata = Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt))) val wmask = Mux(doing_reset, ~(0.U(bankWidth.W)), io.update_mask.asUInt) table.write(widx, wdata, wmask.asBools) } val update_u_mask = VecInit((0 until bankWidth*2) map {i => io.update_u_mask(i / 2)}) val update_u_wen = WireInit(doing_reset || doing_clear_u || update_u_mask.reduce(_||_)) val u_rdata = if (singlePorted) { us.read(s1_hashed_idx, !update_u_wen && io.f1_req_valid) } else { us.read(s1_hashed_idx, io.f1_req_valid) } s2_req_rus := u_rdata when (update_u_wen) { val widx = Mux(doing_reset, reset_idx, Mux(doing_clear_u, clear_u_idx, update_idx)) val wdata = Mux(doing_reset || doing_clear_u, VecInit(0.U((bankWidth*2).W).asBools), VecInit(io.update_u.asUInt.asBools)) val wmask = Mux(doing_reset, ~(0.U((bankWidth*2).W)), Mux(doing_clear_u, clear_u_mask, update_u_mask.asUInt)) us.write(widx, wdata, wmask.asBools) } val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W))) val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W))) val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W)))) val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W)) val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i => !doing_reset && wrbypass_tags(i) === update_tag && wrbypass_idxs(i) === update_idx }) val wrbypass_hit = wrbypass_hits.reduce(_||_) val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits) for (w <- 0 until bankWidth) { update_wdata(w).ctr := Mux(io.update_alloc(w), Mux(io.update_taken(w), 4.U, 3.U ), Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)), inc_ctr(io.update_old_ctr(w), io.update_taken(w)) ) ) update_wdata(w).valid := true.B update_wdata(w).tag := update_tag } when (io.update_mask.reduce(_||_)) { when (wrbypass_hits.reduce(_||_)) { wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr)) } .otherwise { wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr)) wrbypass_tags(wrbypass_enq_idx) := update_tag wrbypass_idxs(wrbypass_enq_idx) := update_idx wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries) } } } case class BoomTageParams( // nSets, histLen, tagSz tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7), ( 128, 4, 7), ( 256, 8, 8), ( 256, 16, 8), ( 128, 32, 9), ( 128, 64, 9)), uBitPeriod: Int = 2048, singlePorted: Boolean = false ) class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p) { val tageUBitPeriod = params.uBitPeriod val tageNTables = params.tableInfo.size class TageMeta extends Bundle { val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W))) val alt_differs = Vec(bankWidth, Output(Bool())) val provider_u = Vec(bankWidth, Output(UInt(2.W))) val provider_ctr = Vec(bankWidth, Output(UInt(3.W))) val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W))) } val f3_meta = Wire(new TageMeta) override val metaSz = f3_meta.asUInt.getWidth require(metaSz <= bpdMaxMetaLength) def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = { Mux(!alt_differs, u, Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U), Mux(u === 3.U, 3.U, u + 1.U))) } val tt = params.tableInfo map { case (n, l, s) => { val t = Module(new TageTable(n, s, l, params.uBitPeriod, params.singlePorted)) t.io.f1_req_valid := RegNext(io.f0_valid) t.io.f1_req_pc := RegNext(bankAlign(io.f0_pc)) t.io.f1_req_ghist := io.f1_ghist (t, t.mems) } } val tables = tt.map(_._1) val mems = tt.map(_._2).flatten val f2_resps = VecInit(tables.map(_.io.f2_resp)) val f3_resps = RegNext(f2_resps) val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta) val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) & Fill(bankWidth, s1_update.bits.cfi_mispredicted) val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool())))) val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W))))) val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool()))) val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W)))) val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool()))) val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W)))) s1_update_taken := DontCare s1_update_old_ctr := DontCare s1_update_alloc := DontCare s1_update_u := DontCare for (w <- 0 until bankWidth) { var s2_provided = false.B var s2_provider = 0.U var s2_alt_provided = false.B var s2_alt_provider = 0.U for (i <- 0 until tageNTables) { val hit = f2_resps(i)(w).valid s2_alt_provided = s2_alt_provided || (s2_provided && hit) s2_provided = s2_provided || hit s2_alt_provider = Mux(hit, s2_provider, s2_alt_provider) s2_provider = Mux(hit, i.U, s2_provider) } val s3_provided = RegNext(s2_provided) val s3_provider = RegNext(s2_provider) val s3_alt_provided = RegNext(s2_alt_provided) val s3_alt_provider = RegNext(s2_alt_provider) val prov = RegNext(f2_resps(s2_provider)(w).bits) val alt = RegNext(f2_resps(s2_alt_provider)(w).bits) io.resp.f3(w).taken := Mux(s3_provided, Mux(prov.ctr === 3.U || prov.ctr === 4.U, Mux(s3_alt_provided, alt.ctr(2), io.resp_in(0).f3(w).taken), prov.ctr(2)), io.resp_in(0).f3(w).taken ) f3_meta.provider(w).valid := s3_provided f3_meta.provider(w).bits := s3_provider f3_meta.alt_differs(w) := s3_alt_provided && alt.ctr(2) =/= io.resp.f3(w).taken f3_meta.provider_u(w) := prov.u f3_meta.provider_ctr(w) := prov.ctr // Create a mask of tables which did not hit our query, and also contain useless entries // and also uses a longer history than the provider val allocatable_slots = ( VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt & ~(MaskLower(UIntToOH(f3_meta.provider(w).bits)) & Fill(tageNTables, f3_meta.provider(w).valid)) ) val alloc_lfsr = random.LFSR(tageNTables max 2) val first_entry = PriorityEncoder(allocatable_slots) val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr) val alloc_entry = Mux(allocatable_slots(masked_entry), masked_entry, first_entry) f3_meta.allocate(w).valid := allocatable_slots =/= 0.U f3_meta.allocate(w).bits := alloc_entry val update_was_taken = (s1_update.bits.cfi_idx.valid && (s1_update.bits.cfi_idx.bits === w.U) && s1_update.bits.cfi_taken) when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) { when (s1_update_meta.provider(w).valid) { val provider = s1_update_meta.provider(w).bits s1_update_mask(provider)(w) := true.B s1_update_u_mask(provider)(w) := true.B val new_u = inc_u(s1_update_meta.provider_u(w), s1_update_meta.alt_differs(w), s1_update_mispredict_mask(w)) s1_update_u (provider)(w) := new_u s1_update_taken (provider)(w) := update_was_taken s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w) s1_update_alloc (provider)(w) := false.B } } } when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) { val idx = s1_update.bits.cfi_idx.bits val allocate = s1_update_meta.allocate(idx) when (allocate.valid) { s1_update_mask (allocate.bits)(idx) := true.B s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken s1_update_alloc(allocate.bits)(idx) := true.B s1_update_u_mask(allocate.bits)(idx) := true.B s1_update_u (allocate.bits)(idx) := 0.U } .otherwise { val provider = s1_update_meta.provider(idx) val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U) for (i <- 0 until tageNTables) { when (decr_mask(i)) { s1_update_u_mask(i)(idx) := true.B s1_update_u (i)(idx) := 0.U } } } } for (i <- 0 until tageNTables) { for (w <- 0 until bankWidth) { tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w)) tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w)) tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w)) tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w)) tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w)) tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w)) } tables(i).io.update_pc := RegNext(s1_update.bits.pc) tables(i).io.update_hist := RegNext(s1_update.bits.ghist) } //io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0)) io.f3_meta := f3_meta.asUInt } File predictor.scala: package boom.v4.ifu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import boom.v4.common._ import boom.v4.util.{BoomCoreStringPrefix} // A branch prediction for a single instruction class BranchPrediction(implicit p: Parameters) extends BoomBundle()(p) { // If this is a branch, do we take it? val taken = Bool() // Is this a branch? val is_br = Bool() // Is this a JAL? val is_jal = Bool() // What is the target of his branch/jump? Do we know the target? val predicted_pc = Valid(UInt(vaddrBitsExtended.W)) } // A branch prediction for a entire fetch-width worth of instructions // This is typically merged from individual predictions from the banked // predictor class BranchPredictionBundle(implicit p: Parameters) extends BoomBundle()(p) with HasBoomFrontendParameters { val pc = UInt(vaddrBitsExtended.W) val preds = Vec(fetchWidth, new BranchPrediction) val meta = Output(Vec(nBanks, UInt(bpdMaxMetaLength.W))) val lhist = Output(Vec(nBanks, UInt(localHistoryLength.W))) } // A branch update for a fetch-width worth of instructions class BranchPredictionUpdate(implicit p: Parameters) extends BoomBundle()(p) with HasBoomFrontendParameters { // Indicates that this update is due to a speculated misprediction // Local predictors typically update themselves with speculative info // Global predictors only care about non-speculative updates val is_mispredict_update = Bool() val is_repair_update = Bool() val btb_mispredicts = UInt(fetchWidth.W) def is_btb_mispredict_update = btb_mispredicts =/= 0.U def is_commit_update = !(is_mispredict_update || is_repair_update || is_btb_mispredict_update) val pc = UInt(vaddrBitsExtended.W) // Mask of instructions which are branches. // If these are not cfi_idx, then they were predicted not taken val br_mask = UInt(fetchWidth.W) // Which CFI was taken/mispredicted (if any) val cfi_idx = Valid(UInt(log2Ceil(fetchWidth).W)) // Was the cfi taken? val cfi_taken = Bool() // Was the cfi mispredicted from the original prediction? val cfi_mispredicted = Bool() // Was the cfi a br? val cfi_is_br = Bool() // Was the cfi a jal/jalr? val cfi_is_jal = Bool() // Was the cfi a jalr val cfi_is_jalr = Bool() //val cfi_is_ret = Bool() val ghist = new GlobalHistory val lhist = Vec(nBanks, UInt(localHistoryLength.W)) // What did this CFI jump to? val target = UInt(vaddrBitsExtended.W) val meta = Vec(nBanks, UInt(bpdMaxMetaLength.W)) } // A branch update to a single bank class BranchPredictionBankUpdate(implicit p: Parameters) extends BoomBundle()(p) with HasBoomFrontendParameters { val is_mispredict_update = Bool() val is_repair_update = Bool() val btb_mispredicts = UInt(bankWidth.W) def is_btb_mispredict_update = btb_mispredicts =/= 0.U def is_commit_update = !(is_mispredict_update || is_repair_update || is_btb_mispredict_update) val pc = UInt(vaddrBitsExtended.W) val br_mask = UInt(bankWidth.W) val cfi_idx = Valid(UInt(log2Ceil(bankWidth).W)) val cfi_taken = Bool() val cfi_mispredicted = Bool() val cfi_is_br = Bool() val cfi_is_jal = Bool() val cfi_is_jalr = Bool() val ghist = UInt(globalHistoryLength.W) val lhist = UInt(localHistoryLength.W) val target = UInt(vaddrBitsExtended.W) val meta = UInt(bpdMaxMetaLength.W) } class BranchPredictionRequest(implicit p: Parameters) extends BoomBundle()(p) { val pc = UInt(vaddrBitsExtended.W) val ghist = new GlobalHistory } class BranchPredictionBankResponse(implicit p: Parameters) extends BoomBundle()(p) with HasBoomFrontendParameters { val f1 = Vec(bankWidth, new BranchPrediction) val f2 = Vec(bankWidth, new BranchPrediction) val f3 = Vec(bankWidth, new BranchPrediction) } abstract class BranchPredictorBank(implicit p: Parameters) extends BoomModule()(p) with HasBoomFrontendParameters { val metaSz = 0 def nInputs = 1 val mems: Seq[Tuple3[String, Int, Int]] val io = IO(new Bundle { val f0_valid = Input(Bool()) val f0_pc = Input(UInt(vaddrBitsExtended.W)) val f0_mask = Input(UInt(bankWidth.W)) // Local history not available until end of f1 val f1_ghist = Input(UInt(globalHistoryLength.W)) val f1_lhist = Input(UInt(localHistoryLength.W)) val resp_in = Input(Vec(nInputs, new BranchPredictionBankResponse)) val resp = Output(new BranchPredictionBankResponse) // Store the meta as a UInt, use width inference to figure out the shape val f3_meta = Output(UInt(bpdMaxMetaLength.W)) val f3_fire = Input(Bool()) val update = Input(Valid(new BranchPredictionBankUpdate)) }) io.resp := io.resp_in(0) io.f3_meta := 0.U val s0_idx = fetchIdx(io.f0_pc) val s1_idx = RegNext(s0_idx) val s2_idx = RegNext(s1_idx) val s3_idx = RegNext(s2_idx) val s0_valid = io.f0_valid val s1_valid = RegNext(s0_valid) val s2_valid = RegNext(s1_valid) val s3_valid = RegNext(s2_valid) val s0_mask = io.f0_mask val s1_mask = RegNext(s0_mask) val s2_mask = RegNext(s1_mask) val s3_mask = RegNext(s2_mask) val s0_pc = bankAlign(io.f0_pc) val s1_pc = RegNext(s0_pc) val s2_pc = RegNext(s1_pc) val s0_update = io.update val s0_update_idx = fetchIdx(io.update.bits.pc) val s0_update_valid = io.update.valid val s1_update = RegNext(s0_update) s1_update.bits.pc := bankAlign(s0_update.bits.pc) val s1_update_idx = RegNext(s0_update_idx) val s1_update_valid = RegNext(s0_update_valid) } class BranchPredictor(implicit p: Parameters) extends BoomModule()(p) with HasBoomFrontendParameters { val io = IO(new Bundle { // Requests and responses val f0_req = Input(Valid(new BranchPredictionRequest)) val resp = Output(new Bundle { val f1 = new BranchPredictionBundle val f2 = new BranchPredictionBundle val f3 = new BranchPredictionBundle }) val f3_fire = Input(Bool()) // Update val update = Input(Valid(new BranchPredictionUpdate)) }) var total_memsize = 0 val bpdStr = new StringBuilder bpdStr.append(BoomCoreStringPrefix("==Branch Predictor Memory Sizes==\n")) val banked_predictors = (0 until nBanks) map ( b => { val m = Module(if (useBPD) new ComposedBranchPredictorBank else new NullBranchPredictorBank) for ((n, d, w) <- m.mems) { bpdStr.append(BoomCoreStringPrefix(f"bank$b $n: $d x $w = ${d * w / 8}")) total_memsize = total_memsize + d * w / 8 } m }) bpdStr.append(BoomCoreStringPrefix(f"Total bpd size: ${total_memsize / 1024} KB\n")) override def toString: String = bpdStr.toString val banked_lhist_providers = Seq.fill(nBanks) { Module(if (localHistoryNSets > 0) new LocalBranchPredictorBank else new NullLocalBranchPredictorBank) } if (nBanks == 1) { banked_lhist_providers(0).io.f0_valid := io.f0_req.valid banked_lhist_providers(0).io.f0_pc := bankAlign(io.f0_req.bits.pc) banked_predictors(0).io.f0_valid := io.f0_req.valid banked_predictors(0).io.f0_pc := io.f0_req.bits.pc banked_predictors(0).io.f0_mask := fetchMask(io.f0_req.bits.pc) banked_predictors(0).io.f1_ghist := RegNext(io.f0_req.bits.ghist.histories(0)) banked_predictors(0).io.f1_lhist := banked_lhist_providers(0).io.f1_lhist banked_predictors(0).io.resp_in(0) := (0.U).asTypeOf(new BranchPredictionBankResponse) } else { require(nBanks == 2) banked_predictors(0).io.resp_in(0) := (0.U).asTypeOf(new BranchPredictionBankResponse) banked_predictors(1).io.resp_in(0) := (0.U).asTypeOf(new BranchPredictionBankResponse) banked_predictors(0).io.f1_lhist := banked_lhist_providers(0).io.f1_lhist banked_predictors(1).io.f1_lhist := banked_lhist_providers(1).io.f1_lhist when (bank(io.f0_req.bits.pc) === 0.U) { banked_lhist_providers(0).io.f0_valid := io.f0_req.valid banked_lhist_providers(0).io.f0_pc := bankAlign(io.f0_req.bits.pc) banked_lhist_providers(1).io.f0_valid := io.f0_req.valid banked_lhist_providers(1).io.f0_pc := nextBank(io.f0_req.bits.pc) banked_predictors(0).io.f0_valid := io.f0_req.valid banked_predictors(0).io.f0_pc := io.f0_req.bits.pc banked_predictors(0).io.f0_mask := fetchMask(io.f0_req.bits.pc) banked_predictors(1).io.f0_valid := io.f0_req.valid banked_predictors(1).io.f0_pc := nextBank(io.f0_req.bits.pc) banked_predictors(1).io.f0_mask := ~(0.U(bankWidth.W)) } .otherwise { banked_lhist_providers(0).io.f0_valid := io.f0_req.valid && !mayNotBeDualBanked(io.f0_req.bits.pc) banked_lhist_providers(0).io.f0_pc := nextBank(io.f0_req.bits.pc) banked_lhist_providers(1).io.f0_valid := io.f0_req.valid banked_lhist_providers(1).io.f0_pc := bankAlign(io.f0_req.bits.pc) banked_predictors(0).io.f0_valid := io.f0_req.valid && !mayNotBeDualBanked(io.f0_req.bits.pc) banked_predictors(0).io.f0_pc := nextBank(io.f0_req.bits.pc) banked_predictors(0).io.f0_mask := ~(0.U(bankWidth.W)) banked_predictors(1).io.f0_valid := io.f0_req.valid banked_predictors(1).io.f0_pc := io.f0_req.bits.pc banked_predictors(1).io.f0_mask := fetchMask(io.f0_req.bits.pc) } when (RegNext(bank(io.f0_req.bits.pc) === 0.U)) { banked_predictors(0).io.f1_ghist := RegNext(io.f0_req.bits.ghist.histories(0)) banked_predictors(1).io.f1_ghist := RegNext(io.f0_req.bits.ghist.histories(1)) } .otherwise { banked_predictors(0).io.f1_ghist := RegNext(io.f0_req.bits.ghist.histories(1)) banked_predictors(1).io.f1_ghist := RegNext(io.f0_req.bits.ghist.histories(0)) } } for (i <- 0 until nBanks) { banked_lhist_providers(i).io.f3_taken_br := banked_predictors(i).io.resp.f3.map ( p => p.is_br && p.predicted_pc.valid && p.taken ).reduce(_||_) } if (nBanks == 1) { io.resp.f1.preds := banked_predictors(0).io.resp.f1 io.resp.f2.preds := banked_predictors(0).io.resp.f2 io.resp.f3.preds := banked_predictors(0).io.resp.f3 io.resp.f3.meta(0) := banked_predictors(0).io.f3_meta io.resp.f3.lhist(0) := banked_lhist_providers(0).io.f3_lhist banked_predictors(0).io.f3_fire := io.f3_fire banked_lhist_providers(0).io.f3_fire := io.f3_fire } else { require(nBanks == 2) val b0_fire = io.f3_fire && RegNext(RegNext(RegNext(banked_predictors(0).io.f0_valid))) val b1_fire = io.f3_fire && RegNext(RegNext(RegNext(banked_predictors(1).io.f0_valid))) banked_predictors(0).io.f3_fire := b0_fire banked_predictors(1).io.f3_fire := b1_fire banked_lhist_providers(0).io.f3_fire := b0_fire banked_lhist_providers(1).io.f3_fire := b1_fire // The branch prediction metadata is stored un-shuffled io.resp.f3.meta(0) := banked_predictors(0).io.f3_meta io.resp.f3.meta(1) := banked_predictors(1).io.f3_meta io.resp.f3.lhist(0) := banked_lhist_providers(0).io.f3_lhist io.resp.f3.lhist(1) := banked_lhist_providers(1).io.f3_lhist when (bank(io.resp.f1.pc) === 0.U) { for (i <- 0 until bankWidth) { io.resp.f1.preds(i) := banked_predictors(0).io.resp.f1(i) io.resp.f1.preds(i+bankWidth) := banked_predictors(1).io.resp.f1(i) } } .otherwise { for (i <- 0 until bankWidth) { io.resp.f1.preds(i) := banked_predictors(1).io.resp.f1(i) io.resp.f1.preds(i+bankWidth) := banked_predictors(0).io.resp.f1(i) } } when (bank(io.resp.f2.pc) === 0.U) { for (i <- 0 until bankWidth) { io.resp.f2.preds(i) := banked_predictors(0).io.resp.f2(i) io.resp.f2.preds(i+bankWidth) := banked_predictors(1).io.resp.f2(i) } } .otherwise { for (i <- 0 until bankWidth) { io.resp.f2.preds(i) := banked_predictors(1).io.resp.f2(i) io.resp.f2.preds(i+bankWidth) := banked_predictors(0).io.resp.f2(i) } } when (bank(io.resp.f3.pc) === 0.U) { for (i <- 0 until bankWidth) { io.resp.f3.preds(i) := banked_predictors(0).io.resp.f3(i) io.resp.f3.preds(i+bankWidth) := banked_predictors(1).io.resp.f3(i) } } .otherwise { for (i <- 0 until bankWidth) { io.resp.f3.preds(i) := banked_predictors(1).io.resp.f3(i) io.resp.f3.preds(i+bankWidth) := banked_predictors(0).io.resp.f3(i) } } } io.resp.f1.pc := RegNext(io.f0_req.bits.pc) io.resp.f2.pc := RegNext(io.resp.f1.pc) io.resp.f3.pc := RegNext(io.resp.f2.pc) // We don't care about meta from the f1 and f2 resps // Use the meta from the latest resp io.resp.f1.meta := DontCare io.resp.f2.meta := DontCare io.resp.f1.lhist := DontCare io.resp.f2.lhist := DontCare for (i <- 0 until nBanks) { banked_predictors(i).io.update.bits.is_mispredict_update := io.update.bits.is_mispredict_update banked_predictors(i).io.update.bits.is_repair_update := io.update.bits.is_repair_update banked_predictors(i).io.update.bits.meta := io.update.bits.meta(i) banked_predictors(i).io.update.bits.lhist := io.update.bits.lhist(i) banked_predictors(i).io.update.bits.cfi_idx.bits := io.update.bits.cfi_idx.bits banked_predictors(i).io.update.bits.cfi_taken := io.update.bits.cfi_taken banked_predictors(i).io.update.bits.cfi_mispredicted := io.update.bits.cfi_mispredicted banked_predictors(i).io.update.bits.cfi_is_br := io.update.bits.cfi_is_br banked_predictors(i).io.update.bits.cfi_is_jal := io.update.bits.cfi_is_jal banked_predictors(i).io.update.bits.cfi_is_jalr := io.update.bits.cfi_is_jalr banked_predictors(i).io.update.bits.target := io.update.bits.target banked_lhist_providers(i).io.update.mispredict := io.update.bits.is_mispredict_update banked_lhist_providers(i).io.update.repair := io.update.bits.is_repair_update banked_lhist_providers(i).io.update.lhist := io.update.bits.lhist(i) } if (nBanks == 1) { banked_predictors(0).io.update.valid := io.update.valid banked_predictors(0).io.update.bits.pc := io.update.bits.pc banked_predictors(0).io.update.bits.br_mask := io.update.bits.br_mask banked_predictors(0).io.update.bits.btb_mispredicts := io.update.bits.btb_mispredicts banked_predictors(0).io.update.bits.cfi_idx.valid := io.update.bits.cfi_idx.valid banked_predictors(0).io.update.bits.ghist := io.update.bits.ghist.histories(0) banked_lhist_providers(0).io.update.valid := io.update.valid && io.update.bits.br_mask =/= 0.U banked_lhist_providers(0).io.update.pc := bankAlign(io.update.bits.pc) } else { require(nBanks == 2) // Split the single update bundle for the fetchpacket into two updates // 1 for each bank. when (bank(io.update.bits.pc) === 0.U) { val b1_update_valid = io.update.valid && (!io.update.bits.cfi_idx.valid || io.update.bits.cfi_idx.bits >= bankWidth.U) banked_lhist_providers(0).io.update.valid := io.update.valid && io.update.bits.br_mask(bankWidth-1,0) =/= 0.U banked_lhist_providers(1).io.update.valid := b1_update_valid && io.update.bits.br_mask(fetchWidth-1,bankWidth) =/= 0.U banked_lhist_providers(0).io.update.pc := bankAlign(io.update.bits.pc) banked_lhist_providers(1).io.update.pc := nextBank(io.update.bits.pc) banked_predictors(0).io.update.valid := io.update.valid banked_predictors(1).io.update.valid := b1_update_valid banked_predictors(0).io.update.bits.pc := io.update.bits.pc banked_predictors(1).io.update.bits.pc := nextBank(io.update.bits.pc) banked_predictors(0).io.update.bits.br_mask := io.update.bits.br_mask banked_predictors(1).io.update.bits.br_mask := io.update.bits.br_mask >> bankWidth banked_predictors(0).io.update.bits.btb_mispredicts := io.update.bits.btb_mispredicts banked_predictors(1).io.update.bits.btb_mispredicts := io.update.bits.btb_mispredicts >> bankWidth banked_predictors(0).io.update.bits.cfi_idx.valid := io.update.bits.cfi_idx.valid && io.update.bits.cfi_idx.bits < bankWidth.U banked_predictors(1).io.update.bits.cfi_idx.valid := io.update.bits.cfi_idx.valid && io.update.bits.cfi_idx.bits >= bankWidth.U banked_predictors(0).io.update.bits.ghist := io.update.bits.ghist.histories(0) banked_predictors(1).io.update.bits.ghist := io.update.bits.ghist.histories(1) } .otherwise { val b0_update_valid = io.update.valid && !mayNotBeDualBanked(io.update.bits.pc) && (!io.update.bits.cfi_idx.valid || io.update.bits.cfi_idx.bits >= bankWidth.U) banked_lhist_providers(1).io.update.valid := io.update.valid && io.update.bits.br_mask(bankWidth-1,0) =/= 0.U banked_lhist_providers(0).io.update.valid := b0_update_valid && io.update.bits.br_mask(fetchWidth-1,bankWidth) =/= 0.U banked_lhist_providers(1).io.update.pc := bankAlign(io.update.bits.pc) banked_lhist_providers(0).io.update.pc := nextBank(io.update.bits.pc) banked_predictors(1).io.update.valid := io.update.valid banked_predictors(0).io.update.valid := b0_update_valid banked_predictors(1).io.update.bits.pc := io.update.bits.pc banked_predictors(0).io.update.bits.pc := nextBank(io.update.bits.pc) banked_predictors(1).io.update.bits.br_mask := io.update.bits.br_mask banked_predictors(0).io.update.bits.br_mask := io.update.bits.br_mask >> bankWidth banked_predictors(1).io.update.bits.btb_mispredicts := io.update.bits.btb_mispredicts banked_predictors(0).io.update.bits.btb_mispredicts := io.update.bits.btb_mispredicts >> bankWidth banked_predictors(1).io.update.bits.cfi_idx.valid := io.update.bits.cfi_idx.valid && io.update.bits.cfi_idx.bits < bankWidth.U banked_predictors(0).io.update.bits.cfi_idx.valid := io.update.bits.cfi_idx.valid && io.update.bits.cfi_idx.bits >= bankWidth.U banked_predictors(1).io.update.bits.ghist := io.update.bits.ghist.histories(0) banked_predictors(0).io.update.bits.ghist := io.update.bits.ghist.histories(1) } } when (io.update.valid) { when (io.update.bits.cfi_is_br && io.update.bits.cfi_idx.valid) { assert(io.update.bits.br_mask(io.update.bits.cfi_idx.bits)) } } } class NullBranchPredictorBank(implicit p: Parameters) extends BranchPredictorBank()(p) { val mems = Nil }
module TageBranchPredictorBank( // @[tage.scala:209:7] input clock, // @[tage.scala:209:7] input reset, // @[tage.scala:209:7] input io_f0_valid, // @[predictor.scala:140:14] input [39:0] io_f0_pc, // @[predictor.scala:140:14] input [3:0] io_f0_mask, // @[predictor.scala:140:14] input [63:0] io_f1_ghist, // @[predictor.scala:140:14] input io_resp_in_0_f1_0_taken, // @[predictor.scala:140:14] input io_resp_in_0_f1_0_is_br, // @[predictor.scala:140:14] input io_resp_in_0_f1_0_is_jal, // @[predictor.scala:140:14] input io_resp_in_0_f1_0_predicted_pc_valid, // @[predictor.scala:140:14] input [39:0] io_resp_in_0_f1_0_predicted_pc_bits, // @[predictor.scala:140:14] input io_resp_in_0_f1_1_taken, // @[predictor.scala:140:14] input io_resp_in_0_f1_1_is_br, // @[predictor.scala:140:14] input io_resp_in_0_f1_1_is_jal, // @[predictor.scala:140:14] input io_resp_in_0_f1_1_predicted_pc_valid, // @[predictor.scala:140:14] input [39:0] io_resp_in_0_f1_1_predicted_pc_bits, // @[predictor.scala:140:14] input io_resp_in_0_f1_2_taken, // @[predictor.scala:140:14] input io_resp_in_0_f1_2_is_br, // @[predictor.scala:140:14] input io_resp_in_0_f1_2_is_jal, // @[predictor.scala:140:14] input io_resp_in_0_f1_2_predicted_pc_valid, // @[predictor.scala:140:14] input [39:0] io_resp_in_0_f1_2_predicted_pc_bits, // @[predictor.scala:140:14] input io_resp_in_0_f1_3_taken, // @[predictor.scala:140:14] input io_resp_in_0_f1_3_is_br, // @[predictor.scala:140:14] input io_resp_in_0_f1_3_is_jal, // @[predictor.scala:140:14] input io_resp_in_0_f1_3_predicted_pc_valid, // @[predictor.scala:140:14] input [39:0] io_resp_in_0_f1_3_predicted_pc_bits, // @[predictor.scala:140:14] input io_resp_in_0_f2_0_taken, // @[predictor.scala:140:14] input io_resp_in_0_f2_0_is_br, // @[predictor.scala:140:14] input io_resp_in_0_f2_0_is_jal, // @[predictor.scala:140:14] input io_resp_in_0_f2_0_predicted_pc_valid, // @[predictor.scala:140:14] input [39:0] io_resp_in_0_f2_0_predicted_pc_bits, // @[predictor.scala:140:14] input io_resp_in_0_f2_1_taken, // @[predictor.scala:140:14] input io_resp_in_0_f2_1_is_br, // @[predictor.scala:140:14] input io_resp_in_0_f2_1_is_jal, // @[predictor.scala:140:14] input io_resp_in_0_f2_1_predicted_pc_valid, // @[predictor.scala:140:14] input [39:0] io_resp_in_0_f2_1_predicted_pc_bits, // @[predictor.scala:140:14] input io_resp_in_0_f2_2_taken, // @[predictor.scala:140:14] input io_resp_in_0_f2_2_is_br, // @[predictor.scala:140:14] input io_resp_in_0_f2_2_is_jal, // @[predictor.scala:140:14] input io_resp_in_0_f2_2_predicted_pc_valid, // @[predictor.scala:140:14] input [39:0] io_resp_in_0_f2_2_predicted_pc_bits, // @[predictor.scala:140:14] input io_resp_in_0_f2_3_taken, // @[predictor.scala:140:14] input io_resp_in_0_f2_3_is_br, // @[predictor.scala:140:14] input io_resp_in_0_f2_3_is_jal, // @[predictor.scala:140:14] input io_resp_in_0_f2_3_predicted_pc_valid, // @[predictor.scala:140:14] input [39:0] io_resp_in_0_f2_3_predicted_pc_bits, // @[predictor.scala:140:14] input io_resp_in_0_f3_0_taken, // @[predictor.scala:140:14] input io_resp_in_0_f3_0_is_br, // @[predictor.scala:140:14] input io_resp_in_0_f3_0_is_jal, // @[predictor.scala:140:14] input io_resp_in_0_f3_0_predicted_pc_valid, // @[predictor.scala:140:14] input [39:0] io_resp_in_0_f3_0_predicted_pc_bits, // @[predictor.scala:140:14] input io_resp_in_0_f3_1_taken, // @[predictor.scala:140:14] input io_resp_in_0_f3_1_is_br, // @[predictor.scala:140:14] input io_resp_in_0_f3_1_is_jal, // @[predictor.scala:140:14] input io_resp_in_0_f3_1_predicted_pc_valid, // @[predictor.scala:140:14] input [39:0] io_resp_in_0_f3_1_predicted_pc_bits, // @[predictor.scala:140:14] input io_resp_in_0_f3_2_taken, // @[predictor.scala:140:14] input io_resp_in_0_f3_2_is_br, // @[predictor.scala:140:14] input io_resp_in_0_f3_2_is_jal, // @[predictor.scala:140:14] input io_resp_in_0_f3_2_predicted_pc_valid, // @[predictor.scala:140:14] input [39:0] io_resp_in_0_f3_2_predicted_pc_bits, // @[predictor.scala:140:14] input io_resp_in_0_f3_3_taken, // @[predictor.scala:140:14] input io_resp_in_0_f3_3_is_br, // @[predictor.scala:140:14] input io_resp_in_0_f3_3_is_jal, // @[predictor.scala:140:14] input io_resp_in_0_f3_3_predicted_pc_valid, // @[predictor.scala:140:14] input [39:0] io_resp_in_0_f3_3_predicted_pc_bits, // @[predictor.scala:140:14] output io_resp_f1_0_taken, // @[predictor.scala:140:14] output io_resp_f1_0_is_br, // @[predictor.scala:140:14] output io_resp_f1_0_is_jal, // @[predictor.scala:140:14] output io_resp_f1_0_predicted_pc_valid, // @[predictor.scala:140:14] output [39:0] io_resp_f1_0_predicted_pc_bits, // @[predictor.scala:140:14] output io_resp_f1_1_taken, // @[predictor.scala:140:14] output io_resp_f1_1_is_br, // @[predictor.scala:140:14] output io_resp_f1_1_is_jal, // @[predictor.scala:140:14] output io_resp_f1_1_predicted_pc_valid, // @[predictor.scala:140:14] output [39:0] io_resp_f1_1_predicted_pc_bits, // @[predictor.scala:140:14] output io_resp_f1_2_taken, // @[predictor.scala:140:14] output io_resp_f1_2_is_br, // @[predictor.scala:140:14] output io_resp_f1_2_is_jal, // @[predictor.scala:140:14] output io_resp_f1_2_predicted_pc_valid, // @[predictor.scala:140:14] output [39:0] io_resp_f1_2_predicted_pc_bits, // @[predictor.scala:140:14] output io_resp_f1_3_taken, // @[predictor.scala:140:14] output io_resp_f1_3_is_br, // @[predictor.scala:140:14] output io_resp_f1_3_is_jal, // @[predictor.scala:140:14] output io_resp_f1_3_predicted_pc_valid, // @[predictor.scala:140:14] output [39:0] io_resp_f1_3_predicted_pc_bits, // @[predictor.scala:140:14] output io_resp_f2_0_taken, // @[predictor.scala:140:14] output io_resp_f2_0_is_br, // @[predictor.scala:140:14] output io_resp_f2_0_is_jal, // @[predictor.scala:140:14] output io_resp_f2_0_predicted_pc_valid, // @[predictor.scala:140:14] output [39:0] io_resp_f2_0_predicted_pc_bits, // @[predictor.scala:140:14] output io_resp_f2_1_taken, // @[predictor.scala:140:14] output io_resp_f2_1_is_br, // @[predictor.scala:140:14] output io_resp_f2_1_is_jal, // @[predictor.scala:140:14] output io_resp_f2_1_predicted_pc_valid, // @[predictor.scala:140:14] output [39:0] io_resp_f2_1_predicted_pc_bits, // @[predictor.scala:140:14] output io_resp_f2_2_taken, // @[predictor.scala:140:14] output io_resp_f2_2_is_br, // @[predictor.scala:140:14] output io_resp_f2_2_is_jal, // @[predictor.scala:140:14] output io_resp_f2_2_predicted_pc_valid, // @[predictor.scala:140:14] output [39:0] io_resp_f2_2_predicted_pc_bits, // @[predictor.scala:140:14] output io_resp_f2_3_taken, // @[predictor.scala:140:14] output io_resp_f2_3_is_br, // @[predictor.scala:140:14] output io_resp_f2_3_is_jal, // @[predictor.scala:140:14] output io_resp_f2_3_predicted_pc_valid, // @[predictor.scala:140:14] output [39:0] io_resp_f2_3_predicted_pc_bits, // @[predictor.scala:140:14] output io_resp_f3_0_taken, // @[predictor.scala:140:14] output io_resp_f3_0_is_br, // @[predictor.scala:140:14] output io_resp_f3_0_is_jal, // @[predictor.scala:140:14] output io_resp_f3_0_predicted_pc_valid, // @[predictor.scala:140:14] output [39:0] io_resp_f3_0_predicted_pc_bits, // @[predictor.scala:140:14] output io_resp_f3_1_taken, // @[predictor.scala:140:14] output io_resp_f3_1_is_br, // @[predictor.scala:140:14] output io_resp_f3_1_is_jal, // @[predictor.scala:140:14] output io_resp_f3_1_predicted_pc_valid, // @[predictor.scala:140:14] output [39:0] io_resp_f3_1_predicted_pc_bits, // @[predictor.scala:140:14] output io_resp_f3_2_taken, // @[predictor.scala:140:14] output io_resp_f3_2_is_br, // @[predictor.scala:140:14] output io_resp_f3_2_is_jal, // @[predictor.scala:140:14] output io_resp_f3_2_predicted_pc_valid, // @[predictor.scala:140:14] output [39:0] io_resp_f3_2_predicted_pc_bits, // @[predictor.scala:140:14] output io_resp_f3_3_taken, // @[predictor.scala:140:14] output io_resp_f3_3_is_br, // @[predictor.scala:140:14] output io_resp_f3_3_is_jal, // @[predictor.scala:140:14] output io_resp_f3_3_predicted_pc_valid, // @[predictor.scala:140:14] output [39:0] io_resp_f3_3_predicted_pc_bits, // @[predictor.scala:140:14] output [119:0] io_f3_meta, // @[predictor.scala:140:14] input io_f3_fire, // @[predictor.scala:140:14] input io_update_valid, // @[predictor.scala:140:14] input io_update_bits_is_mispredict_update, // @[predictor.scala:140:14] input io_update_bits_is_repair_update, // @[predictor.scala:140:14] input [3:0] io_update_bits_btb_mispredicts, // @[predictor.scala:140:14] input [39:0] io_update_bits_pc, // @[predictor.scala:140:14] input [3:0] io_update_bits_br_mask, // @[predictor.scala:140:14] input io_update_bits_cfi_idx_valid, // @[predictor.scala:140:14] input [1:0] io_update_bits_cfi_idx_bits, // @[predictor.scala:140:14] input io_update_bits_cfi_taken, // @[predictor.scala:140:14] input io_update_bits_cfi_mispredicted, // @[predictor.scala:140:14] input io_update_bits_cfi_is_br, // @[predictor.scala:140:14] input io_update_bits_cfi_is_jal, // @[predictor.scala:140:14] input io_update_bits_cfi_is_jalr, // @[predictor.scala:140:14] input [63:0] io_update_bits_ghist, // @[predictor.scala:140:14] input io_update_bits_lhist, // @[predictor.scala:140:14] input [39:0] io_update_bits_target, // @[predictor.scala:140:14] input [119:0] io_update_bits_meta // @[predictor.scala:140:14] ); wire [2:0] s1_update_meta_provider_ctr_3; // @[tage.scala:248:52] wire [2:0] s1_update_meta_provider_ctr_2; // @[tage.scala:248:52] wire [2:0] s1_update_meta_provider_ctr_1; // @[tage.scala:248:52] wire [2:0] s1_update_meta_provider_ctr_0; // @[tage.scala:248:52] wire _alloc_lfsr_prng_3_io_out_0; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_3_io_out_1; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_3_io_out_2; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_3_io_out_3; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_3_io_out_4; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_3_io_out_5; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_2_io_out_0; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_2_io_out_1; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_2_io_out_2; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_2_io_out_3; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_2_io_out_4; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_2_io_out_5; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_1_io_out_0; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_1_io_out_1; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_1_io_out_2; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_1_io_out_3; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_1_io_out_4; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_1_io_out_5; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_io_out_0; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_io_out_1; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_io_out_2; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_io_out_3; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_io_out_4; // @[PRNG.scala:91:22] wire _alloc_lfsr_prng_io_out_5; // @[PRNG.scala:91:22] wire io_f0_valid_0 = io_f0_valid; // @[tage.scala:209:7] wire [39:0] io_f0_pc_0 = io_f0_pc; // @[tage.scala:209:7] wire [3:0] io_f0_mask_0 = io_f0_mask; // @[tage.scala:209:7] wire [63:0] io_f1_ghist_0 = io_f1_ghist; // @[tage.scala:209:7] wire io_resp_in_0_f1_0_taken_0 = io_resp_in_0_f1_0_taken; // @[tage.scala:209:7] wire io_resp_in_0_f1_0_is_br_0 = io_resp_in_0_f1_0_is_br; // @[tage.scala:209:7] wire io_resp_in_0_f1_0_is_jal_0 = io_resp_in_0_f1_0_is_jal; // @[tage.scala:209:7] wire io_resp_in_0_f1_0_predicted_pc_valid_0 = io_resp_in_0_f1_0_predicted_pc_valid; // @[tage.scala:209:7] wire [39:0] io_resp_in_0_f1_0_predicted_pc_bits_0 = io_resp_in_0_f1_0_predicted_pc_bits; // @[tage.scala:209:7] wire io_resp_in_0_f1_1_taken_0 = io_resp_in_0_f1_1_taken; // @[tage.scala:209:7] wire io_resp_in_0_f1_1_is_br_0 = io_resp_in_0_f1_1_is_br; // @[tage.scala:209:7] wire io_resp_in_0_f1_1_is_jal_0 = io_resp_in_0_f1_1_is_jal; // @[tage.scala:209:7] wire io_resp_in_0_f1_1_predicted_pc_valid_0 = io_resp_in_0_f1_1_predicted_pc_valid; // @[tage.scala:209:7] wire [39:0] io_resp_in_0_f1_1_predicted_pc_bits_0 = io_resp_in_0_f1_1_predicted_pc_bits; // @[tage.scala:209:7] wire io_resp_in_0_f1_2_taken_0 = io_resp_in_0_f1_2_taken; // @[tage.scala:209:7] wire io_resp_in_0_f1_2_is_br_0 = io_resp_in_0_f1_2_is_br; // @[tage.scala:209:7] wire io_resp_in_0_f1_2_is_jal_0 = io_resp_in_0_f1_2_is_jal; // @[tage.scala:209:7] wire io_resp_in_0_f1_2_predicted_pc_valid_0 = io_resp_in_0_f1_2_predicted_pc_valid; // @[tage.scala:209:7] wire [39:0] io_resp_in_0_f1_2_predicted_pc_bits_0 = io_resp_in_0_f1_2_predicted_pc_bits; // @[tage.scala:209:7] wire io_resp_in_0_f1_3_taken_0 = io_resp_in_0_f1_3_taken; // @[tage.scala:209:7] wire io_resp_in_0_f1_3_is_br_0 = io_resp_in_0_f1_3_is_br; // @[tage.scala:209:7] wire io_resp_in_0_f1_3_is_jal_0 = io_resp_in_0_f1_3_is_jal; // @[tage.scala:209:7] wire io_resp_in_0_f1_3_predicted_pc_valid_0 = io_resp_in_0_f1_3_predicted_pc_valid; // @[tage.scala:209:7] wire [39:0] io_resp_in_0_f1_3_predicted_pc_bits_0 = io_resp_in_0_f1_3_predicted_pc_bits; // @[tage.scala:209:7] wire io_resp_in_0_f2_0_taken_0 = io_resp_in_0_f2_0_taken; // @[tage.scala:209:7] wire io_resp_in_0_f2_0_is_br_0 = io_resp_in_0_f2_0_is_br; // @[tage.scala:209:7] wire io_resp_in_0_f2_0_is_jal_0 = io_resp_in_0_f2_0_is_jal; // @[tage.scala:209:7] wire io_resp_in_0_f2_0_predicted_pc_valid_0 = io_resp_in_0_f2_0_predicted_pc_valid; // @[tage.scala:209:7] wire [39:0] io_resp_in_0_f2_0_predicted_pc_bits_0 = io_resp_in_0_f2_0_predicted_pc_bits; // @[tage.scala:209:7] wire io_resp_in_0_f2_1_taken_0 = io_resp_in_0_f2_1_taken; // @[tage.scala:209:7] wire io_resp_in_0_f2_1_is_br_0 = io_resp_in_0_f2_1_is_br; // @[tage.scala:209:7] wire io_resp_in_0_f2_1_is_jal_0 = io_resp_in_0_f2_1_is_jal; // @[tage.scala:209:7] wire io_resp_in_0_f2_1_predicted_pc_valid_0 = io_resp_in_0_f2_1_predicted_pc_valid; // @[tage.scala:209:7] wire [39:0] io_resp_in_0_f2_1_predicted_pc_bits_0 = io_resp_in_0_f2_1_predicted_pc_bits; // @[tage.scala:209:7] wire io_resp_in_0_f2_2_taken_0 = io_resp_in_0_f2_2_taken; // @[tage.scala:209:7] wire io_resp_in_0_f2_2_is_br_0 = io_resp_in_0_f2_2_is_br; // @[tage.scala:209:7] wire io_resp_in_0_f2_2_is_jal_0 = io_resp_in_0_f2_2_is_jal; // @[tage.scala:209:7] wire io_resp_in_0_f2_2_predicted_pc_valid_0 = io_resp_in_0_f2_2_predicted_pc_valid; // @[tage.scala:209:7] wire [39:0] io_resp_in_0_f2_2_predicted_pc_bits_0 = io_resp_in_0_f2_2_predicted_pc_bits; // @[tage.scala:209:7] wire io_resp_in_0_f2_3_taken_0 = io_resp_in_0_f2_3_taken; // @[tage.scala:209:7] wire io_resp_in_0_f2_3_is_br_0 = io_resp_in_0_f2_3_is_br; // @[tage.scala:209:7] wire io_resp_in_0_f2_3_is_jal_0 = io_resp_in_0_f2_3_is_jal; // @[tage.scala:209:7] wire io_resp_in_0_f2_3_predicted_pc_valid_0 = io_resp_in_0_f2_3_predicted_pc_valid; // @[tage.scala:209:7] wire [39:0] io_resp_in_0_f2_3_predicted_pc_bits_0 = io_resp_in_0_f2_3_predicted_pc_bits; // @[tage.scala:209:7] wire io_resp_in_0_f3_0_taken_0 = io_resp_in_0_f3_0_taken; // @[tage.scala:209:7] wire io_resp_in_0_f3_0_is_br_0 = io_resp_in_0_f3_0_is_br; // @[tage.scala:209:7] wire io_resp_in_0_f3_0_is_jal_0 = io_resp_in_0_f3_0_is_jal; // @[tage.scala:209:7] wire io_resp_in_0_f3_0_predicted_pc_valid_0 = io_resp_in_0_f3_0_predicted_pc_valid; // @[tage.scala:209:7] wire [39:0] io_resp_in_0_f3_0_predicted_pc_bits_0 = io_resp_in_0_f3_0_predicted_pc_bits; // @[tage.scala:209:7] wire io_resp_in_0_f3_1_taken_0 = io_resp_in_0_f3_1_taken; // @[tage.scala:209:7] wire io_resp_in_0_f3_1_is_br_0 = io_resp_in_0_f3_1_is_br; // @[tage.scala:209:7] wire io_resp_in_0_f3_1_is_jal_0 = io_resp_in_0_f3_1_is_jal; // @[tage.scala:209:7] wire io_resp_in_0_f3_1_predicted_pc_valid_0 = io_resp_in_0_f3_1_predicted_pc_valid; // @[tage.scala:209:7] wire [39:0] io_resp_in_0_f3_1_predicted_pc_bits_0 = io_resp_in_0_f3_1_predicted_pc_bits; // @[tage.scala:209:7] wire io_resp_in_0_f3_2_taken_0 = io_resp_in_0_f3_2_taken; // @[tage.scala:209:7] wire io_resp_in_0_f3_2_is_br_0 = io_resp_in_0_f3_2_is_br; // @[tage.scala:209:7] wire io_resp_in_0_f3_2_is_jal_0 = io_resp_in_0_f3_2_is_jal; // @[tage.scala:209:7] wire io_resp_in_0_f3_2_predicted_pc_valid_0 = io_resp_in_0_f3_2_predicted_pc_valid; // @[tage.scala:209:7] wire [39:0] io_resp_in_0_f3_2_predicted_pc_bits_0 = io_resp_in_0_f3_2_predicted_pc_bits; // @[tage.scala:209:7] wire io_resp_in_0_f3_3_taken_0 = io_resp_in_0_f3_3_taken; // @[tage.scala:209:7] wire io_resp_in_0_f3_3_is_br_0 = io_resp_in_0_f3_3_is_br; // @[tage.scala:209:7] wire io_resp_in_0_f3_3_is_jal_0 = io_resp_in_0_f3_3_is_jal; // @[tage.scala:209:7] wire io_resp_in_0_f3_3_predicted_pc_valid_0 = io_resp_in_0_f3_3_predicted_pc_valid; // @[tage.scala:209:7] wire [39:0] io_resp_in_0_f3_3_predicted_pc_bits_0 = io_resp_in_0_f3_3_predicted_pc_bits; // @[tage.scala:209:7] wire io_f3_fire_0 = io_f3_fire; // @[tage.scala:209:7] wire io_update_valid_0 = io_update_valid; // @[tage.scala:209:7] wire io_update_bits_is_mispredict_update_0 = io_update_bits_is_mispredict_update; // @[tage.scala:209:7] wire io_update_bits_is_repair_update_0 = io_update_bits_is_repair_update; // @[tage.scala:209:7] wire [3:0] io_update_bits_btb_mispredicts_0 = io_update_bits_btb_mispredicts; // @[tage.scala:209:7] wire [39:0] io_update_bits_pc_0 = io_update_bits_pc; // @[tage.scala:209:7] wire [3:0] io_update_bits_br_mask_0 = io_update_bits_br_mask; // @[tage.scala:209:7] wire io_update_bits_cfi_idx_valid_0 = io_update_bits_cfi_idx_valid; // @[tage.scala:209:7] wire [1:0] io_update_bits_cfi_idx_bits_0 = io_update_bits_cfi_idx_bits; // @[tage.scala:209:7] wire io_update_bits_cfi_taken_0 = io_update_bits_cfi_taken; // @[tage.scala:209:7] wire io_update_bits_cfi_mispredicted_0 = io_update_bits_cfi_mispredicted; // @[tage.scala:209:7] wire io_update_bits_cfi_is_br_0 = io_update_bits_cfi_is_br; // @[tage.scala:209:7] wire io_update_bits_cfi_is_jal_0 = io_update_bits_cfi_is_jal; // @[tage.scala:209:7] wire io_update_bits_cfi_is_jalr_0 = io_update_bits_cfi_is_jalr; // @[tage.scala:209:7] wire [63:0] io_update_bits_ghist_0 = io_update_bits_ghist; // @[tage.scala:209:7] wire io_update_bits_lhist_0 = io_update_bits_lhist; // @[tage.scala:209:7] wire [39:0] io_update_bits_target_0 = io_update_bits_target; // @[tage.scala:209:7] wire [119:0] io_update_bits_meta_0 = io_update_bits_meta; // @[tage.scala:209:7] wire io_f1_lhist = 1'h0; // @[tage.scala:209:7] wire _s1_update_mask_WIRE_0_0 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_0_1 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_0_2 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_0_3 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_1_0 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_1_1 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_1_2 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_1_3 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_2_0 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_2_1 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_2_2 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_2_3 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_3_0 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_3_1 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_3_2 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_3_3 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_4_0 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_4_1 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_4_2 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_4_3 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_5_0 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_5_1 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_5_2 = 1'h0; // @[tage.scala:252:48] wire _s1_update_mask_WIRE_5_3 = 1'h0; // @[tage.scala:252:48] wire _s1_update_u_mask_WIRE_0_0 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_0_1 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_0_2 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_0_3 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_1_0 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_1_1 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_1_2 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_1_3 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_2_0 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_2_1 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_2_2 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_2_3 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_3_0 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_3_1 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_3_2 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_3_3 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_4_0 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_4_1 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_4_2 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_4_3 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_5_0 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_5_1 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_5_2 = 1'h0; // @[tage.scala:253:50] wire _s1_update_u_mask_WIRE_5_3 = 1'h0; // @[tage.scala:253:50] wire io_resp_f1_0_taken_0 = io_resp_in_0_f1_0_taken_0; // @[tage.scala:209:7] wire io_resp_f1_0_is_br_0 = io_resp_in_0_f1_0_is_br_0; // @[tage.scala:209:7] wire io_resp_f1_0_is_jal_0 = io_resp_in_0_f1_0_is_jal_0; // @[tage.scala:209:7] wire io_resp_f1_0_predicted_pc_valid_0 = io_resp_in_0_f1_0_predicted_pc_valid_0; // @[tage.scala:209:7] wire [39:0] io_resp_f1_0_predicted_pc_bits_0 = io_resp_in_0_f1_0_predicted_pc_bits_0; // @[tage.scala:209:7] wire io_resp_f1_1_taken_0 = io_resp_in_0_f1_1_taken_0; // @[tage.scala:209:7] wire io_resp_f1_1_is_br_0 = io_resp_in_0_f1_1_is_br_0; // @[tage.scala:209:7] wire io_resp_f1_1_is_jal_0 = io_resp_in_0_f1_1_is_jal_0; // @[tage.scala:209:7] wire io_resp_f1_1_predicted_pc_valid_0 = io_resp_in_0_f1_1_predicted_pc_valid_0; // @[tage.scala:209:7] wire [39:0] io_resp_f1_1_predicted_pc_bits_0 = io_resp_in_0_f1_1_predicted_pc_bits_0; // @[tage.scala:209:7] wire io_resp_f1_2_taken_0 = io_resp_in_0_f1_2_taken_0; // @[tage.scala:209:7] wire io_resp_f1_2_is_br_0 = io_resp_in_0_f1_2_is_br_0; // @[tage.scala:209:7] wire io_resp_f1_2_is_jal_0 = io_resp_in_0_f1_2_is_jal_0; // @[tage.scala:209:7] wire io_resp_f1_2_predicted_pc_valid_0 = io_resp_in_0_f1_2_predicted_pc_valid_0; // @[tage.scala:209:7] wire [39:0] io_resp_f1_2_predicted_pc_bits_0 = io_resp_in_0_f1_2_predicted_pc_bits_0; // @[tage.scala:209:7] wire io_resp_f1_3_taken_0 = io_resp_in_0_f1_3_taken_0; // @[tage.scala:209:7] wire io_resp_f1_3_is_br_0 = io_resp_in_0_f1_3_is_br_0; // @[tage.scala:209:7] wire io_resp_f1_3_is_jal_0 = io_resp_in_0_f1_3_is_jal_0; // @[tage.scala:209:7] wire io_resp_f1_3_predicted_pc_valid_0 = io_resp_in_0_f1_3_predicted_pc_valid_0; // @[tage.scala:209:7] wire [39:0] io_resp_f1_3_predicted_pc_bits_0 = io_resp_in_0_f1_3_predicted_pc_bits_0; // @[tage.scala:209:7] wire io_resp_f2_0_taken_0 = io_resp_in_0_f2_0_taken_0; // @[tage.scala:209:7] wire io_resp_f2_0_is_br_0 = io_resp_in_0_f2_0_is_br_0; // @[tage.scala:209:7] wire io_resp_f2_0_is_jal_0 = io_resp_in_0_f2_0_is_jal_0; // @[tage.scala:209:7] wire io_resp_f2_0_predicted_pc_valid_0 = io_resp_in_0_f2_0_predicted_pc_valid_0; // @[tage.scala:209:7] wire [39:0] io_resp_f2_0_predicted_pc_bits_0 = io_resp_in_0_f2_0_predicted_pc_bits_0; // @[tage.scala:209:7] wire io_resp_f2_1_taken_0 = io_resp_in_0_f2_1_taken_0; // @[tage.scala:209:7] wire io_resp_f2_1_is_br_0 = io_resp_in_0_f2_1_is_br_0; // @[tage.scala:209:7] wire io_resp_f2_1_is_jal_0 = io_resp_in_0_f2_1_is_jal_0; // @[tage.scala:209:7] wire io_resp_f2_1_predicted_pc_valid_0 = io_resp_in_0_f2_1_predicted_pc_valid_0; // @[tage.scala:209:7] wire [39:0] io_resp_f2_1_predicted_pc_bits_0 = io_resp_in_0_f2_1_predicted_pc_bits_0; // @[tage.scala:209:7] wire io_resp_f2_2_taken_0 = io_resp_in_0_f2_2_taken_0; // @[tage.scala:209:7] wire io_resp_f2_2_is_br_0 = io_resp_in_0_f2_2_is_br_0; // @[tage.scala:209:7] wire io_resp_f2_2_is_jal_0 = io_resp_in_0_f2_2_is_jal_0; // @[tage.scala:209:7] wire io_resp_f2_2_predicted_pc_valid_0 = io_resp_in_0_f2_2_predicted_pc_valid_0; // @[tage.scala:209:7] wire [39:0] io_resp_f2_2_predicted_pc_bits_0 = io_resp_in_0_f2_2_predicted_pc_bits_0; // @[tage.scala:209:7] wire io_resp_f2_3_taken_0 = io_resp_in_0_f2_3_taken_0; // @[tage.scala:209:7] wire io_resp_f2_3_is_br_0 = io_resp_in_0_f2_3_is_br_0; // @[tage.scala:209:7] wire io_resp_f2_3_is_jal_0 = io_resp_in_0_f2_3_is_jal_0; // @[tage.scala:209:7] wire io_resp_f2_3_predicted_pc_valid_0 = io_resp_in_0_f2_3_predicted_pc_valid_0; // @[tage.scala:209:7] wire [39:0] io_resp_f2_3_predicted_pc_bits_0 = io_resp_in_0_f2_3_predicted_pc_bits_0; // @[tage.scala:209:7] wire io_resp_f3_0_is_br_0 = io_resp_in_0_f3_0_is_br_0; // @[tage.scala:209:7] wire io_resp_f3_0_is_jal_0 = io_resp_in_0_f3_0_is_jal_0; // @[tage.scala:209:7] wire io_resp_f3_0_predicted_pc_valid_0 = io_resp_in_0_f3_0_predicted_pc_valid_0; // @[tage.scala:209:7] wire [39:0] io_resp_f3_0_predicted_pc_bits_0 = io_resp_in_0_f3_0_predicted_pc_bits_0; // @[tage.scala:209:7] wire io_resp_f3_1_is_br_0 = io_resp_in_0_f3_1_is_br_0; // @[tage.scala:209:7] wire io_resp_f3_1_is_jal_0 = io_resp_in_0_f3_1_is_jal_0; // @[tage.scala:209:7] wire io_resp_f3_1_predicted_pc_valid_0 = io_resp_in_0_f3_1_predicted_pc_valid_0; // @[tage.scala:209:7] wire [39:0] io_resp_f3_1_predicted_pc_bits_0 = io_resp_in_0_f3_1_predicted_pc_bits_0; // @[tage.scala:209:7] wire io_resp_f3_2_is_br_0 = io_resp_in_0_f3_2_is_br_0; // @[tage.scala:209:7] wire io_resp_f3_2_is_jal_0 = io_resp_in_0_f3_2_is_jal_0; // @[tage.scala:209:7] wire io_resp_f3_2_predicted_pc_valid_0 = io_resp_in_0_f3_2_predicted_pc_valid_0; // @[tage.scala:209:7] wire [39:0] io_resp_f3_2_predicted_pc_bits_0 = io_resp_in_0_f3_2_predicted_pc_bits_0; // @[tage.scala:209:7] wire io_resp_f3_3_is_br_0 = io_resp_in_0_f3_3_is_br_0; // @[tage.scala:209:7] wire io_resp_f3_3_is_jal_0 = io_resp_in_0_f3_3_is_jal_0; // @[tage.scala:209:7] wire io_resp_f3_3_predicted_pc_valid_0 = io_resp_in_0_f3_3_predicted_pc_valid_0; // @[tage.scala:209:7] wire [39:0] io_resp_f3_3_predicted_pc_bits_0 = io_resp_in_0_f3_3_predicted_pc_bits_0; // @[tage.scala:209:7] wire _io_resp_f3_0_taken_T_7; // @[tage.scala:287:31] wire _io_resp_f3_1_taken_T_7; // @[tage.scala:287:31] wire _io_resp_f3_2_taken_T_7; // @[tage.scala:287:31] wire _io_resp_f3_3_taken_T_7; // @[tage.scala:287:31] wire io_resp_f3_0_taken_0; // @[tage.scala:209:7] wire io_resp_f3_1_taken_0; // @[tage.scala:209:7] wire io_resp_f3_2_taken_0; // @[tage.scala:209:7] wire io_resp_f3_3_taken_0; // @[tage.scala:209:7] wire [119:0] io_f3_meta_0; // @[tage.scala:209:7] wire [35:0] s0_idx = io_f0_pc_0[39:4]; // @[frontend.scala:149:35] reg [35:0] s1_idx; // @[predictor.scala:163:29] reg [35:0] s2_idx; // @[predictor.scala:164:29] reg [35:0] s3_idx; // @[predictor.scala:165:29] reg s1_valid; // @[predictor.scala:168:25] reg s2_valid; // @[predictor.scala:169:25] reg s3_valid; // @[predictor.scala:170:25] reg [3:0] s1_mask; // @[predictor.scala:173:24] reg [3:0] s2_mask; // @[predictor.scala:174:24] reg [3:0] s3_mask; // @[predictor.scala:175:24] wire [39:0] _s0_pc_T = ~io_f0_pc_0; // @[frontend.scala:147:33] wire [39:0] _s0_pc_T_1 = {_s0_pc_T[39:3], 3'h7}; // @[frontend.scala:147:{33,39}] wire [39:0] s0_pc = ~_s0_pc_T_1; // @[frontend.scala:147:{31,39}] reg [39:0] s1_pc; // @[predictor.scala:178:22] reg [39:0] s2_pc; // @[predictor.scala:179:22] wire [35:0] s0_update_idx = io_update_bits_pc_0[39:4]; // @[frontend.scala:149:35] reg s1_update_valid; // @[predictor.scala:185:30] reg s1_update_bits_is_mispredict_update; // @[predictor.scala:185:30] reg s1_update_bits_is_repair_update; // @[predictor.scala:185:30] reg [3:0] s1_update_bits_btb_mispredicts; // @[predictor.scala:185:30] reg [39:0] s1_update_bits_pc; // @[predictor.scala:185:30] reg [3:0] s1_update_bits_br_mask; // @[predictor.scala:185:30] reg s1_update_bits_cfi_idx_valid; // @[predictor.scala:185:30] reg [1:0] s1_update_bits_cfi_idx_bits; // @[predictor.scala:185:30] reg s1_update_bits_cfi_taken; // @[predictor.scala:185:30] reg s1_update_bits_cfi_mispredicted; // @[predictor.scala:185:30] reg s1_update_bits_cfi_is_br; // @[predictor.scala:185:30] reg s1_update_bits_cfi_is_jal; // @[predictor.scala:185:30] reg s1_update_bits_cfi_is_jalr; // @[predictor.scala:185:30] reg [63:0] s1_update_bits_ghist; // @[predictor.scala:185:30] reg s1_update_bits_lhist; // @[predictor.scala:185:30] reg [39:0] s1_update_bits_target; // @[predictor.scala:185:30] reg [119:0] s1_update_bits_meta; // @[predictor.scala:185:30] wire [39:0] _s1_update_bits_pc_T = ~io_update_bits_pc_0; // @[frontend.scala:147:33] wire [39:0] _s1_update_bits_pc_T_1 = {_s1_update_bits_pc_T[39:3], 3'h7}; // @[frontend.scala:147:{33,39}] wire [39:0] _s1_update_bits_pc_T_2 = ~_s1_update_bits_pc_T_1; // @[frontend.scala:147:{31,39}] reg [35:0] s1_update_idx; // @[predictor.scala:187:30] reg s1_update_valid_0; // @[predictor.scala:188:32] wire _f3_meta_alt_differs_0_T_2; // @[tage.scala:295:50] wire _f3_meta_alt_differs_1_T_2; // @[tage.scala:295:50] wire _f3_meta_alt_differs_2_T_2; // @[tage.scala:295:50] wire _f3_meta_alt_differs_3_T_2; // @[tage.scala:295:50] wire _f3_meta_allocate_0_valid_T; // @[tage.scala:313:52] wire [2:0] alloc_entry; // @[tage.scala:309:26] wire _f3_meta_allocate_1_valid_T; // @[tage.scala:313:52] wire [2:0] alloc_entry_1; // @[tage.scala:309:26] wire _f3_meta_allocate_2_valid_T; // @[tage.scala:313:52] wire [2:0] alloc_entry_2; // @[tage.scala:309:26] wire _f3_meta_allocate_3_valid_T; // @[tage.scala:313:52] wire [2:0] alloc_entry_3; // @[tage.scala:309:26] wire f3_meta_provider_0_valid; // @[tage.scala:223:21] wire [2:0] f3_meta_provider_0_bits; // @[tage.scala:223:21] wire f3_meta_provider_1_valid; // @[tage.scala:223:21] wire [2:0] f3_meta_provider_1_bits; // @[tage.scala:223:21] wire f3_meta_provider_2_valid; // @[tage.scala:223:21] wire [2:0] f3_meta_provider_2_bits; // @[tage.scala:223:21] wire f3_meta_provider_3_valid; // @[tage.scala:223:21] wire [2:0] f3_meta_provider_3_bits; // @[tage.scala:223:21] wire f3_meta_alt_differs_0; // @[tage.scala:223:21] wire f3_meta_alt_differs_1; // @[tage.scala:223:21] wire f3_meta_alt_differs_2; // @[tage.scala:223:21] wire f3_meta_alt_differs_3; // @[tage.scala:223:21] wire [1:0] f3_meta_provider_u_0; // @[tage.scala:223:21] wire [1:0] f3_meta_provider_u_1; // @[tage.scala:223:21] wire [1:0] f3_meta_provider_u_2; // @[tage.scala:223:21] wire [1:0] f3_meta_provider_u_3; // @[tage.scala:223:21] wire [2:0] f3_meta_provider_ctr_0; // @[tage.scala:223:21] wire [2:0] f3_meta_provider_ctr_1; // @[tage.scala:223:21] wire [2:0] f3_meta_provider_ctr_2; // @[tage.scala:223:21] wire [2:0] f3_meta_provider_ctr_3; // @[tage.scala:223:21] wire f3_meta_allocate_0_valid; // @[tage.scala:223:21] wire [2:0] f3_meta_allocate_0_bits; // @[tage.scala:223:21] wire f3_meta_allocate_1_valid; // @[tage.scala:223:21] wire [2:0] f3_meta_allocate_1_bits; // @[tage.scala:223:21] wire f3_meta_allocate_2_valid; // @[tage.scala:223:21] wire [2:0] f3_meta_allocate_2_bits; // @[tage.scala:223:21] wire f3_meta_allocate_3_valid; // @[tage.scala:223:21] wire [2:0] f3_meta_allocate_3_bits; // @[tage.scala:223:21] wire [3:0] _io_f3_meta_T = {f3_meta_allocate_0_valid, f3_meta_allocate_0_bits}; // @[tage.scala:223:21, :224:33, :379:25] wire [3:0] _io_f3_meta_T_1 = {f3_meta_allocate_1_valid, f3_meta_allocate_1_bits}; // @[tage.scala:223:21, :224:33, :379:25] wire [3:0] _io_f3_meta_T_2 = {f3_meta_allocate_2_valid, f3_meta_allocate_2_bits}; // @[tage.scala:223:21, :224:33, :379:25] wire [3:0] _io_f3_meta_T_3 = {f3_meta_allocate_3_valid, f3_meta_allocate_3_bits}; // @[tage.scala:223:21, :224:33, :379:25] wire [7:0] lo = {f3_meta_allocate_1_valid, f3_meta_allocate_1_bits, f3_meta_allocate_0_valid, f3_meta_allocate_0_bits}; // @[tage.scala:223:21, :224:33] wire [7:0] hi = {f3_meta_allocate_3_valid, f3_meta_allocate_3_bits, f3_meta_allocate_2_valid, f3_meta_allocate_2_bits}; // @[tage.scala:223:21, :224:33] wire [5:0] _GEN = {f3_meta_provider_ctr_1, f3_meta_provider_ctr_0}; // @[tage.scala:223:21, :224:33] wire [5:0] lo_1; // @[tage.scala:224:33] assign lo_1 = _GEN; // @[tage.scala:224:33] wire [5:0] io_f3_meta_lo_1; // @[tage.scala:379:25] assign io_f3_meta_lo_1 = _GEN; // @[tage.scala:224:33, :379:25] wire [5:0] _GEN_0 = {f3_meta_provider_ctr_3, f3_meta_provider_ctr_2}; // @[tage.scala:223:21, :224:33] wire [5:0] hi_1; // @[tage.scala:224:33] assign hi_1 = _GEN_0; // @[tage.scala:224:33] wire [5:0] io_f3_meta_hi_1; // @[tage.scala:379:25] assign io_f3_meta_hi_1 = _GEN_0; // @[tage.scala:224:33, :379:25] wire [3:0] _GEN_1 = {f3_meta_provider_u_1, f3_meta_provider_u_0}; // @[tage.scala:223:21, :224:33] wire [3:0] lo_2; // @[tage.scala:224:33] assign lo_2 = _GEN_1; // @[tage.scala:224:33] wire [3:0] io_f3_meta_lo_2; // @[tage.scala:379:25] assign io_f3_meta_lo_2 = _GEN_1; // @[tage.scala:224:33, :379:25] wire [3:0] _GEN_2 = {f3_meta_provider_u_3, f3_meta_provider_u_2}; // @[tage.scala:223:21, :224:33] wire [3:0] hi_2; // @[tage.scala:224:33] assign hi_2 = _GEN_2; // @[tage.scala:224:33] wire [3:0] io_f3_meta_hi_2; // @[tage.scala:379:25] assign io_f3_meta_hi_2 = _GEN_2; // @[tage.scala:224:33, :379:25] wire [1:0] _GEN_3 = {f3_meta_alt_differs_1, f3_meta_alt_differs_0}; // @[tage.scala:223:21, :224:33] wire [1:0] lo_3; // @[tage.scala:224:33] assign lo_3 = _GEN_3; // @[tage.scala:224:33] wire [1:0] io_f3_meta_lo_3; // @[tage.scala:379:25] assign io_f3_meta_lo_3 = _GEN_3; // @[tage.scala:224:33, :379:25] wire [1:0] _GEN_4 = {f3_meta_alt_differs_3, f3_meta_alt_differs_2}; // @[tage.scala:223:21, :224:33] wire [1:0] hi_3; // @[tage.scala:224:33] assign hi_3 = _GEN_4; // @[tage.scala:224:33] wire [1:0] io_f3_meta_hi_3; // @[tage.scala:379:25] assign io_f3_meta_hi_3 = _GEN_4; // @[tage.scala:224:33, :379:25] wire [3:0] _io_f3_meta_T_8 = {f3_meta_provider_0_valid, f3_meta_provider_0_bits}; // @[tage.scala:223:21, :224:33, :379:25] wire [3:0] _io_f3_meta_T_9 = {f3_meta_provider_1_valid, f3_meta_provider_1_bits}; // @[tage.scala:223:21, :224:33, :379:25] wire [3:0] _io_f3_meta_T_10 = {f3_meta_provider_2_valid, f3_meta_provider_2_bits}; // @[tage.scala:223:21, :224:33, :379:25] wire [3:0] _io_f3_meta_T_11 = {f3_meta_provider_3_valid, f3_meta_provider_3_bits}; // @[tage.scala:223:21, :224:33, :379:25] wire [7:0] lo_4 = {f3_meta_provider_1_valid, f3_meta_provider_1_bits, f3_meta_provider_0_valid, f3_meta_provider_0_bits}; // @[tage.scala:223:21, :224:33] wire [7:0] hi_4 = {f3_meta_provider_3_valid, f3_meta_provider_3_bits, f3_meta_provider_2_valid, f3_meta_provider_2_bits}; // @[tage.scala:223:21, :224:33] wire [27:0] lo_5 = {hi_1, lo_1, hi, lo}; // @[tage.scala:224:33] wire [19:0] hi_hi = {hi_4, lo_4, hi_3, lo_3}; // @[tage.scala:224:33] wire [27:0] hi_5 = {hi_hi, hi_2, lo_2}; // @[tage.scala:224:33] reg t_io_f1_req_valid_REG; // @[tage.scala:236:35] wire [39:0] _t_io_f1_req_pc_T = ~io_f0_pc_0; // @[frontend.scala:147:33] wire [39:0] _t_io_f1_req_pc_T_1 = {_t_io_f1_req_pc_T[39:3], 3'h7}; // @[frontend.scala:147:{33,39}] wire [39:0] _t_io_f1_req_pc_T_2 = ~_t_io_f1_req_pc_T_1; // @[frontend.scala:147:{31,39}] reg [39:0] t_io_f1_req_pc_REG; // @[tage.scala:237:35] reg t_io_f1_req_valid_REG_1; // @[tage.scala:236:35] wire [39:0] _t_io_f1_req_pc_T_3 = ~io_f0_pc_0; // @[frontend.scala:147:33] wire [39:0] _t_io_f1_req_pc_T_4 = {_t_io_f1_req_pc_T_3[39:3], 3'h7}; // @[frontend.scala:147:{33,39}] wire [39:0] _t_io_f1_req_pc_T_5 = ~_t_io_f1_req_pc_T_4; // @[frontend.scala:147:{31,39}] reg [39:0] t_io_f1_req_pc_REG_1; // @[tage.scala:237:35] reg t_io_f1_req_valid_REG_2; // @[tage.scala:236:35] wire [39:0] _t_io_f1_req_pc_T_6 = ~io_f0_pc_0; // @[frontend.scala:147:33] wire [39:0] _t_io_f1_req_pc_T_7 = {_t_io_f1_req_pc_T_6[39:3], 3'h7}; // @[frontend.scala:147:{33,39}] wire [39:0] _t_io_f1_req_pc_T_8 = ~_t_io_f1_req_pc_T_7; // @[frontend.scala:147:{31,39}] reg [39:0] t_io_f1_req_pc_REG_2; // @[tage.scala:237:35] reg t_io_f1_req_valid_REG_3; // @[tage.scala:236:35] wire [39:0] _t_io_f1_req_pc_T_9 = ~io_f0_pc_0; // @[frontend.scala:147:33] wire [39:0] _t_io_f1_req_pc_T_10 = {_t_io_f1_req_pc_T_9[39:3], 3'h7}; // @[frontend.scala:147:{33,39}] wire [39:0] _t_io_f1_req_pc_T_11 = ~_t_io_f1_req_pc_T_10; // @[frontend.scala:147:{31,39}] reg [39:0] t_io_f1_req_pc_REG_3; // @[tage.scala:237:35] reg t_io_f1_req_valid_REG_4; // @[tage.scala:236:35] wire [39:0] _t_io_f1_req_pc_T_12 = ~io_f0_pc_0; // @[frontend.scala:147:33] wire [39:0] _t_io_f1_req_pc_T_13 = {_t_io_f1_req_pc_T_12[39:3], 3'h7}; // @[frontend.scala:147:{33,39}] wire [39:0] _t_io_f1_req_pc_T_14 = ~_t_io_f1_req_pc_T_13; // @[frontend.scala:147:{31,39}] reg [39:0] t_io_f1_req_pc_REG_4; // @[tage.scala:237:35] reg t_io_f1_req_valid_REG_5; // @[tage.scala:236:35] wire [39:0] _t_io_f1_req_pc_T_15 = ~io_f0_pc_0; // @[frontend.scala:147:33] wire [39:0] _t_io_f1_req_pc_T_16 = {_t_io_f1_req_pc_T_15[39:3], 3'h7}; // @[frontend.scala:147:{33,39}] wire [39:0] _t_io_f1_req_pc_T_17 = ~_t_io_f1_req_pc_T_16; // @[frontend.scala:147:{31,39}] reg [39:0] t_io_f1_req_pc_REG_5; // @[tage.scala:237:35] wire [2:0] f2_resps_0_0_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_0_0_bits_u; // @[tage.scala:245:25] wire f2_resps_0_0_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_0_1_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_0_1_bits_u; // @[tage.scala:245:25] wire f2_resps_0_1_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_0_2_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_0_2_bits_u; // @[tage.scala:245:25] wire f2_resps_0_2_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_0_3_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_0_3_bits_u; // @[tage.scala:245:25] wire f2_resps_0_3_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_1_0_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_1_0_bits_u; // @[tage.scala:245:25] wire f2_resps_1_0_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_1_1_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_1_1_bits_u; // @[tage.scala:245:25] wire f2_resps_1_1_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_1_2_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_1_2_bits_u; // @[tage.scala:245:25] wire f2_resps_1_2_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_1_3_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_1_3_bits_u; // @[tage.scala:245:25] wire f2_resps_1_3_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_2_0_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_2_0_bits_u; // @[tage.scala:245:25] wire f2_resps_2_0_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_2_1_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_2_1_bits_u; // @[tage.scala:245:25] wire f2_resps_2_1_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_2_2_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_2_2_bits_u; // @[tage.scala:245:25] wire f2_resps_2_2_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_2_3_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_2_3_bits_u; // @[tage.scala:245:25] wire f2_resps_2_3_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_3_0_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_3_0_bits_u; // @[tage.scala:245:25] wire f2_resps_3_0_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_3_1_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_3_1_bits_u; // @[tage.scala:245:25] wire f2_resps_3_1_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_3_2_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_3_2_bits_u; // @[tage.scala:245:25] wire f2_resps_3_2_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_3_3_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_3_3_bits_u; // @[tage.scala:245:25] wire f2_resps_3_3_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_4_0_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_4_0_bits_u; // @[tage.scala:245:25] wire f2_resps_4_0_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_4_1_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_4_1_bits_u; // @[tage.scala:245:25] wire f2_resps_4_1_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_4_2_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_4_2_bits_u; // @[tage.scala:245:25] wire f2_resps_4_2_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_4_3_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_4_3_bits_u; // @[tage.scala:245:25] wire f2_resps_4_3_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_5_0_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_5_0_bits_u; // @[tage.scala:245:25] wire f2_resps_5_0_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_5_1_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_5_1_bits_u; // @[tage.scala:245:25] wire f2_resps_5_1_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_5_2_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_5_2_bits_u; // @[tage.scala:245:25] wire f2_resps_5_2_valid; // @[tage.scala:245:25] wire [2:0] f2_resps_5_3_bits_ctr; // @[tage.scala:245:25] wire [1:0] f2_resps_5_3_bits_u; // @[tage.scala:245:25] wire f2_resps_5_3_valid; // @[tage.scala:245:25] reg f3_resps_0_0_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_0_0_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_0_0_bits_u; // @[tage.scala:246:25] reg f3_resps_0_1_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_0_1_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_0_1_bits_u; // @[tage.scala:246:25] reg f3_resps_0_2_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_0_2_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_0_2_bits_u; // @[tage.scala:246:25] reg f3_resps_0_3_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_0_3_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_0_3_bits_u; // @[tage.scala:246:25] reg f3_resps_1_0_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_1_0_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_1_0_bits_u; // @[tage.scala:246:25] reg f3_resps_1_1_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_1_1_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_1_1_bits_u; // @[tage.scala:246:25] reg f3_resps_1_2_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_1_2_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_1_2_bits_u; // @[tage.scala:246:25] reg f3_resps_1_3_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_1_3_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_1_3_bits_u; // @[tage.scala:246:25] reg f3_resps_2_0_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_2_0_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_2_0_bits_u; // @[tage.scala:246:25] reg f3_resps_2_1_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_2_1_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_2_1_bits_u; // @[tage.scala:246:25] reg f3_resps_2_2_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_2_2_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_2_2_bits_u; // @[tage.scala:246:25] reg f3_resps_2_3_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_2_3_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_2_3_bits_u; // @[tage.scala:246:25] reg f3_resps_3_0_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_3_0_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_3_0_bits_u; // @[tage.scala:246:25] reg f3_resps_3_1_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_3_1_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_3_1_bits_u; // @[tage.scala:246:25] reg f3_resps_3_2_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_3_2_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_3_2_bits_u; // @[tage.scala:246:25] reg f3_resps_3_3_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_3_3_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_3_3_bits_u; // @[tage.scala:246:25] reg f3_resps_4_0_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_4_0_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_4_0_bits_u; // @[tage.scala:246:25] reg f3_resps_4_1_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_4_1_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_4_1_bits_u; // @[tage.scala:246:25] reg f3_resps_4_2_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_4_2_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_4_2_bits_u; // @[tage.scala:246:25] reg f3_resps_4_3_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_4_3_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_4_3_bits_u; // @[tage.scala:246:25] reg f3_resps_5_0_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_5_0_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_5_0_bits_u; // @[tage.scala:246:25] reg f3_resps_5_1_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_5_1_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_5_1_bits_u; // @[tage.scala:246:25] reg f3_resps_5_2_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_5_2_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_5_2_bits_u; // @[tage.scala:246:25] reg f3_resps_5_3_valid; // @[tage.scala:246:25] reg [2:0] f3_resps_5_3_bits_ctr; // @[tage.scala:246:25] reg [1:0] f3_resps_5_3_bits_u; // @[tage.scala:246:25] wire _s1_update_meta_T_21; // @[tage.scala:248:52] wire [2:0] _s1_update_meta_T_20; // @[tage.scala:248:52] wire _s1_update_meta_T_23; // @[tage.scala:248:52] wire [2:0] _s1_update_meta_T_22; // @[tage.scala:248:52] wire _s1_update_meta_T_25; // @[tage.scala:248:52] wire [2:0] _s1_update_meta_T_24; // @[tage.scala:248:52] wire _s1_update_meta_T_27; // @[tage.scala:248:52] wire [2:0] _s1_update_meta_T_26; // @[tage.scala:248:52] wire _s1_update_meta_T_16; // @[tage.scala:248:52] wire _s1_update_meta_T_17; // @[tage.scala:248:52] wire _s1_update_meta_T_18; // @[tage.scala:248:52] wire _s1_update_meta_T_19; // @[tage.scala:248:52] wire [1:0] _s1_update_meta_T_12; // @[tage.scala:248:52] wire [1:0] _s1_update_meta_T_13; // @[tage.scala:248:52] wire [1:0] _s1_update_meta_T_14; // @[tage.scala:248:52] wire [1:0] _s1_update_meta_T_15; // @[tage.scala:248:52] wire [2:0] _s1_update_meta_T_8; // @[tage.scala:248:52] wire [2:0] _s1_update_meta_T_9; // @[tage.scala:248:52] wire [2:0] s1_update_old_ctr_0_0 = s1_update_meta_provider_ctr_0; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_1_0 = s1_update_meta_provider_ctr_0; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_2_0 = s1_update_meta_provider_ctr_0; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_3_0 = s1_update_meta_provider_ctr_0; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_4_0 = s1_update_meta_provider_ctr_0; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_5_0 = s1_update_meta_provider_ctr_0; // @[tage.scala:248:52, :256:31] wire [2:0] _s1_update_meta_T_10; // @[tage.scala:248:52] wire [2:0] s1_update_old_ctr_0_1 = s1_update_meta_provider_ctr_1; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_1_1 = s1_update_meta_provider_ctr_1; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_2_1 = s1_update_meta_provider_ctr_1; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_3_1 = s1_update_meta_provider_ctr_1; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_4_1 = s1_update_meta_provider_ctr_1; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_5_1 = s1_update_meta_provider_ctr_1; // @[tage.scala:248:52, :256:31] wire [2:0] _s1_update_meta_T_11; // @[tage.scala:248:52] wire [2:0] s1_update_old_ctr_0_2 = s1_update_meta_provider_ctr_2; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_1_2 = s1_update_meta_provider_ctr_2; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_2_2 = s1_update_meta_provider_ctr_2; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_3_2 = s1_update_meta_provider_ctr_2; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_4_2 = s1_update_meta_provider_ctr_2; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_5_2 = s1_update_meta_provider_ctr_2; // @[tage.scala:248:52, :256:31] wire _s1_update_meta_T_1; // @[tage.scala:248:52] wire [2:0] s1_update_old_ctr_0_3 = s1_update_meta_provider_ctr_3; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_1_3 = s1_update_meta_provider_ctr_3; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_2_3 = s1_update_meta_provider_ctr_3; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_3_3 = s1_update_meta_provider_ctr_3; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_4_3 = s1_update_meta_provider_ctr_3; // @[tage.scala:248:52, :256:31] wire [2:0] s1_update_old_ctr_5_3 = s1_update_meta_provider_ctr_3; // @[tage.scala:248:52, :256:31] wire [2:0] _s1_update_meta_T; // @[tage.scala:248:52] wire _s1_update_meta_T_3; // @[tage.scala:248:52] wire [2:0] _s1_update_meta_T_2; // @[tage.scala:248:52] wire _s1_update_meta_T_5; // @[tage.scala:248:52] wire [2:0] _s1_update_meta_T_4; // @[tage.scala:248:52] wire _s1_update_meta_T_7; // @[tage.scala:248:52] wire [2:0] _s1_update_meta_T_6; // @[tage.scala:248:52] wire s1_update_meta_provider_0_valid; // @[tage.scala:248:52] wire [2:0] s1_update_meta_provider_0_bits; // @[tage.scala:248:52] wire s1_update_meta_provider_1_valid; // @[tage.scala:248:52] wire [2:0] s1_update_meta_provider_1_bits; // @[tage.scala:248:52] wire s1_update_meta_provider_2_valid; // @[tage.scala:248:52] wire [2:0] s1_update_meta_provider_2_bits; // @[tage.scala:248:52] wire s1_update_meta_provider_3_valid; // @[tage.scala:248:52] wire [2:0] s1_update_meta_provider_3_bits; // @[tage.scala:248:52] wire s1_update_meta_alt_differs_0; // @[tage.scala:248:52] wire s1_update_meta_alt_differs_1; // @[tage.scala:248:52] wire s1_update_meta_alt_differs_2; // @[tage.scala:248:52] wire s1_update_meta_alt_differs_3; // @[tage.scala:248:52] wire [1:0] s1_update_meta_provider_u_0; // @[tage.scala:248:52] wire [1:0] s1_update_meta_provider_u_1; // @[tage.scala:248:52] wire [1:0] s1_update_meta_provider_u_2; // @[tage.scala:248:52] wire [1:0] s1_update_meta_provider_u_3; // @[tage.scala:248:52] wire s1_update_meta_allocate_0_valid; // @[tage.scala:248:52] wire [2:0] s1_update_meta_allocate_0_bits; // @[tage.scala:248:52] wire s1_update_meta_allocate_1_valid; // @[tage.scala:248:52] wire [2:0] s1_update_meta_allocate_1_bits; // @[tage.scala:248:52] wire s1_update_meta_allocate_2_valid; // @[tage.scala:248:52] wire [2:0] s1_update_meta_allocate_2_bits; // @[tage.scala:248:52] wire s1_update_meta_allocate_3_valid; // @[tage.scala:248:52] wire [2:0] s1_update_meta_allocate_3_bits; // @[tage.scala:248:52] wire [55:0] _s1_update_meta_WIRE = s1_update_bits_meta[55:0]; // @[tage.scala:248:52] assign _s1_update_meta_T = _s1_update_meta_WIRE[2:0]; // @[tage.scala:248:52] assign s1_update_meta_allocate_0_bits = _s1_update_meta_T; // @[tage.scala:248:52] assign _s1_update_meta_T_1 = _s1_update_meta_WIRE[3]; // @[tage.scala:248:52] assign s1_update_meta_allocate_0_valid = _s1_update_meta_T_1; // @[tage.scala:248:52] assign _s1_update_meta_T_2 = _s1_update_meta_WIRE[6:4]; // @[tage.scala:248:52] assign s1_update_meta_allocate_1_bits = _s1_update_meta_T_2; // @[tage.scala:248:52] assign _s1_update_meta_T_3 = _s1_update_meta_WIRE[7]; // @[tage.scala:248:52] assign s1_update_meta_allocate_1_valid = _s1_update_meta_T_3; // @[tage.scala:248:52] assign _s1_update_meta_T_4 = _s1_update_meta_WIRE[10:8]; // @[tage.scala:248:52] assign s1_update_meta_allocate_2_bits = _s1_update_meta_T_4; // @[tage.scala:248:52] assign _s1_update_meta_T_5 = _s1_update_meta_WIRE[11]; // @[tage.scala:248:52] assign s1_update_meta_allocate_2_valid = _s1_update_meta_T_5; // @[tage.scala:248:52] assign _s1_update_meta_T_6 = _s1_update_meta_WIRE[14:12]; // @[tage.scala:248:52] assign s1_update_meta_allocate_3_bits = _s1_update_meta_T_6; // @[tage.scala:248:52] assign _s1_update_meta_T_7 = _s1_update_meta_WIRE[15]; // @[tage.scala:248:52] assign s1_update_meta_allocate_3_valid = _s1_update_meta_T_7; // @[tage.scala:248:52] assign _s1_update_meta_T_8 = _s1_update_meta_WIRE[18:16]; // @[tage.scala:248:52] assign s1_update_meta_provider_ctr_0 = _s1_update_meta_T_8; // @[tage.scala:248:52] assign _s1_update_meta_T_9 = _s1_update_meta_WIRE[21:19]; // @[tage.scala:248:52] assign s1_update_meta_provider_ctr_1 = _s1_update_meta_T_9; // @[tage.scala:248:52] assign _s1_update_meta_T_10 = _s1_update_meta_WIRE[24:22]; // @[tage.scala:248:52] assign s1_update_meta_provider_ctr_2 = _s1_update_meta_T_10; // @[tage.scala:248:52] assign _s1_update_meta_T_11 = _s1_update_meta_WIRE[27:25]; // @[tage.scala:248:52] assign s1_update_meta_provider_ctr_3 = _s1_update_meta_T_11; // @[tage.scala:248:52] assign _s1_update_meta_T_12 = _s1_update_meta_WIRE[29:28]; // @[tage.scala:248:52] assign s1_update_meta_provider_u_0 = _s1_update_meta_T_12; // @[tage.scala:248:52] assign _s1_update_meta_T_13 = _s1_update_meta_WIRE[31:30]; // @[tage.scala:248:52] assign s1_update_meta_provider_u_1 = _s1_update_meta_T_13; // @[tage.scala:248:52] assign _s1_update_meta_T_14 = _s1_update_meta_WIRE[33:32]; // @[tage.scala:248:52] assign s1_update_meta_provider_u_2 = _s1_update_meta_T_14; // @[tage.scala:248:52] assign _s1_update_meta_T_15 = _s1_update_meta_WIRE[35:34]; // @[tage.scala:248:52] assign s1_update_meta_provider_u_3 = _s1_update_meta_T_15; // @[tage.scala:248:52] assign _s1_update_meta_T_16 = _s1_update_meta_WIRE[36]; // @[tage.scala:248:52] assign s1_update_meta_alt_differs_0 = _s1_update_meta_T_16; // @[tage.scala:248:52] assign _s1_update_meta_T_17 = _s1_update_meta_WIRE[37]; // @[tage.scala:248:52] assign s1_update_meta_alt_differs_1 = _s1_update_meta_T_17; // @[tage.scala:248:52] assign _s1_update_meta_T_18 = _s1_update_meta_WIRE[38]; // @[tage.scala:248:52] assign s1_update_meta_alt_differs_2 = _s1_update_meta_T_18; // @[tage.scala:248:52] assign _s1_update_meta_T_19 = _s1_update_meta_WIRE[39]; // @[tage.scala:248:52] assign s1_update_meta_alt_differs_3 = _s1_update_meta_T_19; // @[tage.scala:248:52] assign _s1_update_meta_T_20 = _s1_update_meta_WIRE[42:40]; // @[tage.scala:248:52] assign s1_update_meta_provider_0_bits = _s1_update_meta_T_20; // @[tage.scala:248:52] assign _s1_update_meta_T_21 = _s1_update_meta_WIRE[43]; // @[tage.scala:248:52] assign s1_update_meta_provider_0_valid = _s1_update_meta_T_21; // @[tage.scala:248:52] assign _s1_update_meta_T_22 = _s1_update_meta_WIRE[46:44]; // @[tage.scala:248:52] assign s1_update_meta_provider_1_bits = _s1_update_meta_T_22; // @[tage.scala:248:52] assign _s1_update_meta_T_23 = _s1_update_meta_WIRE[47]; // @[tage.scala:248:52] assign s1_update_meta_provider_1_valid = _s1_update_meta_T_23; // @[tage.scala:248:52] assign _s1_update_meta_T_24 = _s1_update_meta_WIRE[50:48]; // @[tage.scala:248:52] assign s1_update_meta_provider_2_bits = _s1_update_meta_T_24; // @[tage.scala:248:52] assign _s1_update_meta_T_25 = _s1_update_meta_WIRE[51]; // @[tage.scala:248:52] assign s1_update_meta_provider_2_valid = _s1_update_meta_T_25; // @[tage.scala:248:52] assign _s1_update_meta_T_26 = _s1_update_meta_WIRE[54:52]; // @[tage.scala:248:52] assign s1_update_meta_provider_3_bits = _s1_update_meta_T_26; // @[tage.scala:248:52] assign _s1_update_meta_T_27 = _s1_update_meta_WIRE[55]; // @[tage.scala:248:52] assign s1_update_meta_provider_3_valid = _s1_update_meta_T_27; // @[tage.scala:248:52] wire [3:0] _s1_update_mispredict_mask_T = 4'h1 << s1_update_bits_cfi_idx_bits; // @[OneHot.scala:58:35] wire [3:0] _s1_update_mispredict_mask_T_1 = {4{s1_update_bits_cfi_mispredicted}}; // @[tage.scala:250:9] wire [3:0] s1_update_mispredict_mask = _s1_update_mispredict_mask_T & _s1_update_mispredict_mask_T_1; // @[OneHot.scala:58:35] wire s1_update_mask_0_0; // @[tage.scala:252:33] wire s1_update_mask_0_1; // @[tage.scala:252:33] wire s1_update_mask_0_2; // @[tage.scala:252:33] wire s1_update_mask_0_3; // @[tage.scala:252:33] wire s1_update_mask_1_0; // @[tage.scala:252:33] wire s1_update_mask_1_1; // @[tage.scala:252:33] wire s1_update_mask_1_2; // @[tage.scala:252:33] wire s1_update_mask_1_3; // @[tage.scala:252:33] wire s1_update_mask_2_0; // @[tage.scala:252:33] wire s1_update_mask_2_1; // @[tage.scala:252:33] wire s1_update_mask_2_2; // @[tage.scala:252:33] wire s1_update_mask_2_3; // @[tage.scala:252:33] wire s1_update_mask_3_0; // @[tage.scala:252:33] wire s1_update_mask_3_1; // @[tage.scala:252:33] wire s1_update_mask_3_2; // @[tage.scala:252:33] wire s1_update_mask_3_3; // @[tage.scala:252:33] wire s1_update_mask_4_0; // @[tage.scala:252:33] wire s1_update_mask_4_1; // @[tage.scala:252:33] wire s1_update_mask_4_2; // @[tage.scala:252:33] wire s1_update_mask_4_3; // @[tage.scala:252:33] wire s1_update_mask_5_0; // @[tage.scala:252:33] wire s1_update_mask_5_1; // @[tage.scala:252:33] wire s1_update_mask_5_2; // @[tage.scala:252:33] wire s1_update_mask_5_3; // @[tage.scala:252:33] wire s1_update_u_mask_0_0; // @[tage.scala:253:35] wire s1_update_u_mask_0_1; // @[tage.scala:253:35] wire s1_update_u_mask_0_2; // @[tage.scala:253:35] wire s1_update_u_mask_0_3; // @[tage.scala:253:35] wire s1_update_u_mask_1_0; // @[tage.scala:253:35] wire s1_update_u_mask_1_1; // @[tage.scala:253:35] wire s1_update_u_mask_1_2; // @[tage.scala:253:35] wire s1_update_u_mask_1_3; // @[tage.scala:253:35] wire s1_update_u_mask_2_0; // @[tage.scala:253:35] wire s1_update_u_mask_2_1; // @[tage.scala:253:35] wire s1_update_u_mask_2_2; // @[tage.scala:253:35] wire s1_update_u_mask_2_3; // @[tage.scala:253:35] wire s1_update_u_mask_3_0; // @[tage.scala:253:35] wire s1_update_u_mask_3_1; // @[tage.scala:253:35] wire s1_update_u_mask_3_2; // @[tage.scala:253:35] wire s1_update_u_mask_3_3; // @[tage.scala:253:35] wire s1_update_u_mask_4_0; // @[tage.scala:253:35] wire s1_update_u_mask_4_1; // @[tage.scala:253:35] wire s1_update_u_mask_4_2; // @[tage.scala:253:35] wire s1_update_u_mask_4_3; // @[tage.scala:253:35] wire s1_update_u_mask_5_0; // @[tage.scala:253:35] wire s1_update_u_mask_5_1; // @[tage.scala:253:35] wire s1_update_u_mask_5_2; // @[tage.scala:253:35] wire s1_update_u_mask_5_3; // @[tage.scala:253:35] wire s1_update_taken_0_0; // @[tage.scala:255:31] wire s1_update_taken_0_1; // @[tage.scala:255:31] wire s1_update_taken_0_2; // @[tage.scala:255:31] wire s1_update_taken_0_3; // @[tage.scala:255:31] wire s1_update_taken_1_0; // @[tage.scala:255:31] wire s1_update_taken_1_1; // @[tage.scala:255:31] wire s1_update_taken_1_2; // @[tage.scala:255:31] wire s1_update_taken_1_3; // @[tage.scala:255:31] wire s1_update_taken_2_0; // @[tage.scala:255:31] wire s1_update_taken_2_1; // @[tage.scala:255:31] wire s1_update_taken_2_2; // @[tage.scala:255:31] wire s1_update_taken_2_3; // @[tage.scala:255:31] wire s1_update_taken_3_0; // @[tage.scala:255:31] wire s1_update_taken_3_1; // @[tage.scala:255:31] wire s1_update_taken_3_2; // @[tage.scala:255:31] wire s1_update_taken_3_3; // @[tage.scala:255:31] wire s1_update_taken_4_0; // @[tage.scala:255:31] wire s1_update_taken_4_1; // @[tage.scala:255:31] wire s1_update_taken_4_2; // @[tage.scala:255:31] wire s1_update_taken_4_3; // @[tage.scala:255:31] wire s1_update_taken_5_0; // @[tage.scala:255:31] wire s1_update_taken_5_1; // @[tage.scala:255:31] wire s1_update_taken_5_2; // @[tage.scala:255:31] wire s1_update_taken_5_3; // @[tage.scala:255:31] wire s1_update_alloc_0_0; // @[tage.scala:257:31] wire s1_update_alloc_0_1; // @[tage.scala:257:31] wire s1_update_alloc_0_2; // @[tage.scala:257:31] wire s1_update_alloc_0_3; // @[tage.scala:257:31] wire s1_update_alloc_1_0; // @[tage.scala:257:31] wire s1_update_alloc_1_1; // @[tage.scala:257:31] wire s1_update_alloc_1_2; // @[tage.scala:257:31] wire s1_update_alloc_1_3; // @[tage.scala:257:31] wire s1_update_alloc_2_0; // @[tage.scala:257:31] wire s1_update_alloc_2_1; // @[tage.scala:257:31] wire s1_update_alloc_2_2; // @[tage.scala:257:31] wire s1_update_alloc_2_3; // @[tage.scala:257:31] wire s1_update_alloc_3_0; // @[tage.scala:257:31] wire s1_update_alloc_3_1; // @[tage.scala:257:31] wire s1_update_alloc_3_2; // @[tage.scala:257:31] wire s1_update_alloc_3_3; // @[tage.scala:257:31] wire s1_update_alloc_4_0; // @[tage.scala:257:31] wire s1_update_alloc_4_1; // @[tage.scala:257:31] wire s1_update_alloc_4_2; // @[tage.scala:257:31] wire s1_update_alloc_4_3; // @[tage.scala:257:31] wire s1_update_alloc_5_0; // @[tage.scala:257:31] wire s1_update_alloc_5_1; // @[tage.scala:257:31] wire s1_update_alloc_5_2; // @[tage.scala:257:31] wire s1_update_alloc_5_3; // @[tage.scala:257:31] wire [1:0] s1_update_u_0_0; // @[tage.scala:258:31] wire [1:0] s1_update_u_0_1; // @[tage.scala:258:31] wire [1:0] s1_update_u_0_2; // @[tage.scala:258:31] wire [1:0] s1_update_u_0_3; // @[tage.scala:258:31] wire [1:0] s1_update_u_1_0; // @[tage.scala:258:31] wire [1:0] s1_update_u_1_1; // @[tage.scala:258:31] wire [1:0] s1_update_u_1_2; // @[tage.scala:258:31] wire [1:0] s1_update_u_1_3; // @[tage.scala:258:31] wire [1:0] s1_update_u_2_0; // @[tage.scala:258:31] wire [1:0] s1_update_u_2_1; // @[tage.scala:258:31] wire [1:0] s1_update_u_2_2; // @[tage.scala:258:31] wire [1:0] s1_update_u_2_3; // @[tage.scala:258:31] wire [1:0] s1_update_u_3_0; // @[tage.scala:258:31] wire [1:0] s1_update_u_3_1; // @[tage.scala:258:31] wire [1:0] s1_update_u_3_2; // @[tage.scala:258:31] wire [1:0] s1_update_u_3_3; // @[tage.scala:258:31] wire [1:0] s1_update_u_4_0; // @[tage.scala:258:31] wire [1:0] s1_update_u_4_1; // @[tage.scala:258:31] wire [1:0] s1_update_u_4_2; // @[tage.scala:258:31] wire [1:0] s1_update_u_4_3; // @[tage.scala:258:31] wire [1:0] s1_update_u_5_0; // @[tage.scala:258:31] wire [1:0] s1_update_u_5_1; // @[tage.scala:258:31] wire [1:0] s1_update_u_5_2; // @[tage.scala:258:31] wire [1:0] s1_update_u_5_3; // @[tage.scala:258:31] reg s3_provided; // @[tage.scala:279:30] assign f3_meta_provider_0_valid = s3_provided; // @[tage.scala:223:21, :279:30] reg [2:0] s3_provider; // @[tage.scala:280:30] assign f3_meta_provider_0_bits = s3_provider; // @[tage.scala:223:21, :280:30] reg s3_alt_provided; // @[tage.scala:281:34] reg [2:0] s3_alt_provider; // @[tage.scala:282:34] reg [2:0] prov_ctr; // @[tage.scala:284:23] assign f3_meta_provider_ctr_0 = prov_ctr; // @[tage.scala:223:21, :284:23] reg [1:0] prov_u; // @[tage.scala:284:23] assign f3_meta_provider_u_0 = prov_u; // @[tage.scala:223:21, :284:23] reg [2:0] alt_ctr; // @[tage.scala:285:23] reg [1:0] alt_u; // @[tage.scala:285:23] wire _io_resp_f3_0_taken_T = prov_ctr == 3'h3; // @[tage.scala:284:23, :288:20] wire _io_resp_f3_0_taken_T_1 = prov_ctr == 3'h4; // @[tage.scala:284:23, :288:40] wire _io_resp_f3_0_taken_T_2 = _io_resp_f3_0_taken_T | _io_resp_f3_0_taken_T_1; // @[tage.scala:288:{20,28,40}] wire _io_resp_f3_0_taken_T_3 = alt_ctr[2]; // @[tage.scala:285:23, :289:37] wire _f3_meta_alt_differs_0_T = alt_ctr[2]; // @[tage.scala:285:23, :289:37, :295:60] wire _io_resp_f3_0_taken_T_4 = s3_alt_provided ? _io_resp_f3_0_taken_T_3 : io_resp_in_0_f3_0_taken_0; // @[tage.scala:209:7, :281:34, :289:{12,37}] wire _io_resp_f3_0_taken_T_5 = prov_ctr[2]; // @[tage.scala:284:23, :290:17] wire _io_resp_f3_0_taken_T_6 = _io_resp_f3_0_taken_T_2 ? _io_resp_f3_0_taken_T_4 : _io_resp_f3_0_taken_T_5; // @[tage.scala:288:{10,28}, :289:12, :290:17] assign _io_resp_f3_0_taken_T_7 = s3_provided ? _io_resp_f3_0_taken_T_6 : io_resp_in_0_f3_0_taken_0; // @[tage.scala:209:7, :279:30, :287:31, :288:10] assign io_resp_f3_0_taken_0 = _io_resp_f3_0_taken_T_7; // @[tage.scala:209:7, :287:31] wire _f3_meta_alt_differs_0_T_1 = _f3_meta_alt_differs_0_T != io_resp_f3_0_taken_0; // @[tage.scala:209:7, :295:{60,64}] assign _f3_meta_alt_differs_0_T_2 = s3_alt_provided & _f3_meta_alt_differs_0_T_1; // @[tage.scala:281:34, :295:{50,64}] assign f3_meta_alt_differs_0 = _f3_meta_alt_differs_0_T_2; // @[tage.scala:223:21, :295:50] wire _allocatable_slots_T = ~f3_resps_0_0_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_1 = f3_resps_0_0_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_2 = _allocatable_slots_T & _allocatable_slots_T_1; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_0 = _allocatable_slots_T_2; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_3 = ~f3_resps_1_0_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_4 = f3_resps_1_0_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_5 = _allocatable_slots_T_3 & _allocatable_slots_T_4; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_1 = _allocatable_slots_T_5; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_6 = ~f3_resps_2_0_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_7 = f3_resps_2_0_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_8 = _allocatable_slots_T_6 & _allocatable_slots_T_7; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_2 = _allocatable_slots_T_8; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_9 = ~f3_resps_3_0_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_10 = f3_resps_3_0_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_11 = _allocatable_slots_T_9 & _allocatable_slots_T_10; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_3 = _allocatable_slots_T_11; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_12 = ~f3_resps_4_0_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_13 = f3_resps_4_0_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_14 = _allocatable_slots_T_12 & _allocatable_slots_T_13; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_4 = _allocatable_slots_T_14; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_15 = ~f3_resps_5_0_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_16 = f3_resps_5_0_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_17 = _allocatable_slots_T_15 & _allocatable_slots_T_16; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_5 = _allocatable_slots_T_17; // @[tage.scala:302:{14,45}] wire [1:0] allocatable_slots_lo_hi = {_allocatable_slots_WIRE_2, _allocatable_slots_WIRE_1}; // @[tage.scala:302:{14,70}] wire [2:0] allocatable_slots_lo = {allocatable_slots_lo_hi, _allocatable_slots_WIRE_0}; // @[tage.scala:302:{14,70}] wire [1:0] allocatable_slots_hi_hi = {_allocatable_slots_WIRE_5, _allocatable_slots_WIRE_4}; // @[tage.scala:302:{14,70}] wire [2:0] allocatable_slots_hi = {allocatable_slots_hi_hi, _allocatable_slots_WIRE_3}; // @[tage.scala:302:{14,70}] wire [5:0] _allocatable_slots_T_18 = {allocatable_slots_hi, allocatable_slots_lo}; // @[tage.scala:302:70] wire [7:0] _allocatable_slots_T_19 = 8'h1 << f3_meta_provider_0_bits; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_20 = _allocatable_slots_T_19; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_21 = {1'h0, _allocatable_slots_T_19[7:1]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_22 = {2'h0, _allocatable_slots_T_19[7:2]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_23 = {3'h0, _allocatable_slots_T_19[7:3]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_24 = {4'h0, _allocatable_slots_T_19[7:4]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_25 = {5'h0, _allocatable_slots_T_19[7:5]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_26 = {6'h0, _allocatable_slots_T_19[7:6]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_27 = {7'h0, _allocatable_slots_T_19[7]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_28 = _allocatable_slots_T_20 | _allocatable_slots_T_21; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_29 = _allocatable_slots_T_28 | _allocatable_slots_T_22; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_30 = _allocatable_slots_T_29 | _allocatable_slots_T_23; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_31 = _allocatable_slots_T_30 | _allocatable_slots_T_24; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_32 = _allocatable_slots_T_31 | _allocatable_slots_T_25; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_33 = _allocatable_slots_T_32 | _allocatable_slots_T_26; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_34 = _allocatable_slots_T_33 | _allocatable_slots_T_27; // @[util.scala:383:{29,45}] wire [5:0] _allocatable_slots_T_35 = {6{f3_meta_provider_0_valid}}; // @[tage.scala:223:21, :303:61] wire [7:0] _allocatable_slots_T_36 = {2'h0, _allocatable_slots_T_34[5:0] & _allocatable_slots_T_35}; // @[util.scala:383:45] wire [7:0] _allocatable_slots_T_37 = ~_allocatable_slots_T_36; // @[tage.scala:303:{7,55}] wire [7:0] allocatable_slots = {2'h0, _allocatable_slots_T_37[5:0] & _allocatable_slots_T_18}; // @[tage.scala:302:{70,77}, :303:7] wire [1:0] alloc_lfsr_lo_hi = {_alloc_lfsr_prng_io_out_2, _alloc_lfsr_prng_io_out_1}; // @[PRNG.scala:91:22, :95:17] wire [2:0] alloc_lfsr_lo = {alloc_lfsr_lo_hi, _alloc_lfsr_prng_io_out_0}; // @[PRNG.scala:91:22, :95:17] wire [1:0] alloc_lfsr_hi_hi = {_alloc_lfsr_prng_io_out_5, _alloc_lfsr_prng_io_out_4}; // @[PRNG.scala:91:22, :95:17] wire [2:0] alloc_lfsr_hi = {alloc_lfsr_hi_hi, _alloc_lfsr_prng_io_out_3}; // @[PRNG.scala:91:22, :95:17] wire [5:0] alloc_lfsr = {alloc_lfsr_hi, alloc_lfsr_lo}; // @[PRNG.scala:95:17] wire _first_entry_T = allocatable_slots[0]; // @[OneHot.scala:48:45] wire _first_entry_T_1 = allocatable_slots[1]; // @[OneHot.scala:48:45] wire _first_entry_T_2 = allocatable_slots[2]; // @[OneHot.scala:48:45] wire _first_entry_T_3 = allocatable_slots[3]; // @[OneHot.scala:48:45] wire _first_entry_T_4 = allocatable_slots[4]; // @[OneHot.scala:48:45] wire _first_entry_T_5 = allocatable_slots[5]; // @[OneHot.scala:48:45] wire _first_entry_T_6 = allocatable_slots[6]; // @[OneHot.scala:48:45] wire _first_entry_T_7 = allocatable_slots[7]; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_8 = {2'h3, ~_first_entry_T_6}; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_9 = _first_entry_T_5 ? 3'h5 : _first_entry_T_8; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_10 = _first_entry_T_4 ? 3'h4 : _first_entry_T_9; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_11 = _first_entry_T_3 ? 3'h3 : _first_entry_T_10; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_12 = _first_entry_T_2 ? 3'h2 : _first_entry_T_11; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_13 = _first_entry_T_1 ? 3'h1 : _first_entry_T_12; // @[OneHot.scala:48:45] wire [2:0] first_entry = _first_entry_T ? 3'h0 : _first_entry_T_13; // @[OneHot.scala:48:45] wire [7:0] _masked_entry_T = {2'h0, allocatable_slots[5:0] & alloc_lfsr}; // @[PRNG.scala:95:17] wire _masked_entry_T_1 = _masked_entry_T[0]; // @[OneHot.scala:48:45] wire _masked_entry_T_2 = _masked_entry_T[1]; // @[OneHot.scala:48:45] wire _masked_entry_T_3 = _masked_entry_T[2]; // @[OneHot.scala:48:45] wire _masked_entry_T_4 = _masked_entry_T[3]; // @[OneHot.scala:48:45] wire _masked_entry_T_5 = _masked_entry_T[4]; // @[OneHot.scala:48:45] wire _masked_entry_T_6 = _masked_entry_T[5]; // @[OneHot.scala:48:45] wire _masked_entry_T_7 = _masked_entry_T[6]; // @[OneHot.scala:48:45] wire _masked_entry_T_8 = _masked_entry_T[7]; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_9 = {2'h3, ~_masked_entry_T_7}; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_10 = _masked_entry_T_6 ? 3'h5 : _masked_entry_T_9; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_11 = _masked_entry_T_5 ? 3'h4 : _masked_entry_T_10; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_12 = _masked_entry_T_4 ? 3'h3 : _masked_entry_T_11; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_13 = _masked_entry_T_3 ? 3'h2 : _masked_entry_T_12; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_14 = _masked_entry_T_2 ? 3'h1 : _masked_entry_T_13; // @[OneHot.scala:48:45] wire [2:0] masked_entry = _masked_entry_T_1 ? 3'h0 : _masked_entry_T_14; // @[OneHot.scala:48:45] wire [7:0] _alloc_entry_T = allocatable_slots >> masked_entry; // @[Mux.scala:50:70] wire _alloc_entry_T_1 = _alloc_entry_T[0]; // @[tage.scala:309:44] assign alloc_entry = _alloc_entry_T_1 ? masked_entry : first_entry; // @[Mux.scala:50:70] assign f3_meta_allocate_0_bits = alloc_entry; // @[tage.scala:223:21, :309:26] assign _f3_meta_allocate_0_valid_T = |allocatable_slots; // @[tage.scala:302:77, :313:52] assign f3_meta_allocate_0_valid = _f3_meta_allocate_0_valid_T; // @[tage.scala:223:21, :313:52] wire _update_was_taken_T = s1_update_bits_cfi_idx_bits == 2'h0; // @[tage.scala:317:58] wire _update_was_taken_T_1 = s1_update_bits_cfi_idx_valid & _update_was_taken_T; // @[tage.scala:316:58, :317:58] wire update_was_taken = _update_was_taken_T_1 & s1_update_bits_cfi_taken; // @[tage.scala:316:58, :317:67] wire [4:0] _GEN_5 = {s1_update_bits_is_mispredict_update | s1_update_bits_is_repair_update, s1_update_bits_btb_mispredicts}; // @[predictor.scala:94:50, :96:{49,69}, :185:30] wire _T_50 = s1_update_bits_br_mask[0] & s1_update_valid & _GEN_5 == 5'h0; // @[OneHot.scala:58:35] wire _GEN_6 = _T_50 & s1_update_meta_provider_0_valid & s1_update_meta_provider_0_bits == 3'h0; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_7 = _T_50 & s1_update_meta_provider_0_valid & s1_update_meta_provider_0_bits == 3'h1; // @[tage.scala:229:43, :248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_8 = _T_50 & s1_update_meta_provider_0_valid & s1_update_meta_provider_0_bits == 3'h2; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_9 = _T_50 & s1_update_meta_provider_0_valid & s1_update_meta_provider_0_bits == 3'h3; // @[tage.scala:248:52, :252:33, :288:20, :319:{37,56,92}, :320:47, :323:37] wire _GEN_10 = _T_50 & s1_update_meta_provider_0_valid & s1_update_meta_provider_0_bits == 3'h4; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_11 = _T_50 & s1_update_meta_provider_0_valid & s1_update_meta_provider_0_bits == 3'h5; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _new_u_T = s1_update_mispredict_mask[0]; // @[tage.scala:249:73, :328:52] wire _new_u_T_1 = ~s1_update_meta_alt_differs_0; // @[tage.scala:228:9, :248:52] wire _new_u_T_2 = s1_update_meta_provider_u_0 == 2'h0; // @[tage.scala:229:27, :248:52] wire [2:0] _GEN_12 = {1'h0, s1_update_meta_provider_u_0}; // @[tage.scala:229:43, :248:52] wire [2:0] _new_u_T_3 = _GEN_12 - 3'h1; // @[tage.scala:229:43] wire [1:0] _new_u_T_4 = _new_u_T_3[1:0]; // @[tage.scala:229:43] wire [1:0] _new_u_T_5 = _new_u_T_2 ? 2'h0 : _new_u_T_4; // @[tage.scala:229:{24,27,43}] wire _new_u_T_6 = &s1_update_meta_provider_u_0; // @[tage.scala:230:27, :248:52] wire [2:0] _new_u_T_7 = _GEN_12 + 3'h1; // @[tage.scala:229:43, :230:43] wire [1:0] _new_u_T_8 = _new_u_T_7[1:0]; // @[tage.scala:230:43] wire [1:0] _new_u_T_9 = _new_u_T_6 ? 2'h3 : _new_u_T_8; // @[tage.scala:230:{24,27,43}] wire [1:0] _new_u_T_10 = _new_u_T ? _new_u_T_5 : _new_u_T_9; // @[tage.scala:229:{8,24}, :230:24, :328:52] wire [1:0] new_u = _new_u_T_1 ? s1_update_meta_provider_u_0 : _new_u_T_10; // @[tage.scala:228:{8,9}, :229:8, :248:52] reg s3_provided_1; // @[tage.scala:279:30] assign f3_meta_provider_1_valid = s3_provided_1; // @[tage.scala:223:21, :279:30] reg [2:0] s3_provider_1; // @[tage.scala:280:30] assign f3_meta_provider_1_bits = s3_provider_1; // @[tage.scala:223:21, :280:30] reg s3_alt_provided_1; // @[tage.scala:281:34] reg [2:0] s3_alt_provider_1; // @[tage.scala:282:34] reg [2:0] prov_1_ctr; // @[tage.scala:284:23] assign f3_meta_provider_ctr_1 = prov_1_ctr; // @[tage.scala:223:21, :284:23] reg [1:0] prov_1_u; // @[tage.scala:284:23] assign f3_meta_provider_u_1 = prov_1_u; // @[tage.scala:223:21, :284:23] reg [2:0] alt_1_ctr; // @[tage.scala:285:23] reg [1:0] alt_1_u; // @[tage.scala:285:23] wire _io_resp_f3_1_taken_T = prov_1_ctr == 3'h3; // @[tage.scala:284:23, :288:20] wire _io_resp_f3_1_taken_T_1 = prov_1_ctr == 3'h4; // @[tage.scala:284:23, :288:40] wire _io_resp_f3_1_taken_T_2 = _io_resp_f3_1_taken_T | _io_resp_f3_1_taken_T_1; // @[tage.scala:288:{20,28,40}] wire _io_resp_f3_1_taken_T_3 = alt_1_ctr[2]; // @[tage.scala:285:23, :289:37] wire _f3_meta_alt_differs_1_T = alt_1_ctr[2]; // @[tage.scala:285:23, :289:37, :295:60] wire _io_resp_f3_1_taken_T_4 = s3_alt_provided_1 ? _io_resp_f3_1_taken_T_3 : io_resp_in_0_f3_1_taken_0; // @[tage.scala:209:7, :281:34, :289:{12,37}] wire _io_resp_f3_1_taken_T_5 = prov_1_ctr[2]; // @[tage.scala:284:23, :290:17] wire _io_resp_f3_1_taken_T_6 = _io_resp_f3_1_taken_T_2 ? _io_resp_f3_1_taken_T_4 : _io_resp_f3_1_taken_T_5; // @[tage.scala:288:{10,28}, :289:12, :290:17] assign _io_resp_f3_1_taken_T_7 = s3_provided_1 ? _io_resp_f3_1_taken_T_6 : io_resp_in_0_f3_1_taken_0; // @[tage.scala:209:7, :279:30, :287:31, :288:10] assign io_resp_f3_1_taken_0 = _io_resp_f3_1_taken_T_7; // @[tage.scala:209:7, :287:31] wire _f3_meta_alt_differs_1_T_1 = _f3_meta_alt_differs_1_T != io_resp_f3_1_taken_0; // @[tage.scala:209:7, :295:{60,64}] assign _f3_meta_alt_differs_1_T_2 = s3_alt_provided_1 & _f3_meta_alt_differs_1_T_1; // @[tage.scala:281:34, :295:{50,64}] assign f3_meta_alt_differs_1 = _f3_meta_alt_differs_1_T_2; // @[tage.scala:223:21, :295:50] wire _allocatable_slots_T_38 = ~f3_resps_0_1_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_39 = f3_resps_0_1_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_40 = _allocatable_slots_T_38 & _allocatable_slots_T_39; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_1_0 = _allocatable_slots_T_40; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_41 = ~f3_resps_1_1_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_42 = f3_resps_1_1_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_43 = _allocatable_slots_T_41 & _allocatable_slots_T_42; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_1_1 = _allocatable_slots_T_43; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_44 = ~f3_resps_2_1_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_45 = f3_resps_2_1_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_46 = _allocatable_slots_T_44 & _allocatable_slots_T_45; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_1_2 = _allocatable_slots_T_46; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_47 = ~f3_resps_3_1_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_48 = f3_resps_3_1_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_49 = _allocatable_slots_T_47 & _allocatable_slots_T_48; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_1_3 = _allocatable_slots_T_49; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_50 = ~f3_resps_4_1_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_51 = f3_resps_4_1_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_52 = _allocatable_slots_T_50 & _allocatable_slots_T_51; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_1_4 = _allocatable_slots_T_52; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_53 = ~f3_resps_5_1_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_54 = f3_resps_5_1_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_55 = _allocatable_slots_T_53 & _allocatable_slots_T_54; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_1_5 = _allocatable_slots_T_55; // @[tage.scala:302:{14,45}] wire [1:0] allocatable_slots_lo_hi_1 = {_allocatable_slots_WIRE_1_2, _allocatable_slots_WIRE_1_1}; // @[tage.scala:302:{14,70}] wire [2:0] allocatable_slots_lo_1 = {allocatable_slots_lo_hi_1, _allocatable_slots_WIRE_1_0}; // @[tage.scala:302:{14,70}] wire [1:0] allocatable_slots_hi_hi_1 = {_allocatable_slots_WIRE_1_5, _allocatable_slots_WIRE_1_4}; // @[tage.scala:302:{14,70}] wire [2:0] allocatable_slots_hi_1 = {allocatable_slots_hi_hi_1, _allocatable_slots_WIRE_1_3}; // @[tage.scala:302:{14,70}] wire [5:0] _allocatable_slots_T_56 = {allocatable_slots_hi_1, allocatable_slots_lo_1}; // @[tage.scala:302:70] wire [7:0] _allocatable_slots_T_57 = 8'h1 << f3_meta_provider_1_bits; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_58 = _allocatable_slots_T_57; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_59 = {1'h0, _allocatable_slots_T_57[7:1]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_60 = {2'h0, _allocatable_slots_T_57[7:2]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_61 = {3'h0, _allocatable_slots_T_57[7:3]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_62 = {4'h0, _allocatable_slots_T_57[7:4]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_63 = {5'h0, _allocatable_slots_T_57[7:5]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_64 = {6'h0, _allocatable_slots_T_57[7:6]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_65 = {7'h0, _allocatable_slots_T_57[7]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_66 = _allocatable_slots_T_58 | _allocatable_slots_T_59; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_67 = _allocatable_slots_T_66 | _allocatable_slots_T_60; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_68 = _allocatable_slots_T_67 | _allocatable_slots_T_61; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_69 = _allocatable_slots_T_68 | _allocatable_slots_T_62; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_70 = _allocatable_slots_T_69 | _allocatable_slots_T_63; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_71 = _allocatable_slots_T_70 | _allocatable_slots_T_64; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_72 = _allocatable_slots_T_71 | _allocatable_slots_T_65; // @[util.scala:383:{29,45}] wire [5:0] _allocatable_slots_T_73 = {6{f3_meta_provider_1_valid}}; // @[tage.scala:223:21, :303:61] wire [7:0] _allocatable_slots_T_74 = {2'h0, _allocatable_slots_T_72[5:0] & _allocatable_slots_T_73}; // @[util.scala:383:45] wire [7:0] _allocatable_slots_T_75 = ~_allocatable_slots_T_74; // @[tage.scala:303:{7,55}] wire [7:0] allocatable_slots_1 = {2'h0, _allocatable_slots_T_75[5:0] & _allocatable_slots_T_56}; // @[tage.scala:302:{70,77}, :303:7] wire [1:0] alloc_lfsr_lo_hi_1 = {_alloc_lfsr_prng_1_io_out_2, _alloc_lfsr_prng_1_io_out_1}; // @[PRNG.scala:91:22, :95:17] wire [2:0] alloc_lfsr_lo_1 = {alloc_lfsr_lo_hi_1, _alloc_lfsr_prng_1_io_out_0}; // @[PRNG.scala:91:22, :95:17] wire [1:0] alloc_lfsr_hi_hi_1 = {_alloc_lfsr_prng_1_io_out_5, _alloc_lfsr_prng_1_io_out_4}; // @[PRNG.scala:91:22, :95:17] wire [2:0] alloc_lfsr_hi_1 = {alloc_lfsr_hi_hi_1, _alloc_lfsr_prng_1_io_out_3}; // @[PRNG.scala:91:22, :95:17] wire [5:0] alloc_lfsr_1 = {alloc_lfsr_hi_1, alloc_lfsr_lo_1}; // @[PRNG.scala:95:17] wire _first_entry_T_14 = allocatable_slots_1[0]; // @[OneHot.scala:48:45] wire _first_entry_T_15 = allocatable_slots_1[1]; // @[OneHot.scala:48:45] wire _first_entry_T_16 = allocatable_slots_1[2]; // @[OneHot.scala:48:45] wire _first_entry_T_17 = allocatable_slots_1[3]; // @[OneHot.scala:48:45] wire _first_entry_T_18 = allocatable_slots_1[4]; // @[OneHot.scala:48:45] wire _first_entry_T_19 = allocatable_slots_1[5]; // @[OneHot.scala:48:45] wire _first_entry_T_20 = allocatable_slots_1[6]; // @[OneHot.scala:48:45] wire _first_entry_T_21 = allocatable_slots_1[7]; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_22 = {2'h3, ~_first_entry_T_20}; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_23 = _first_entry_T_19 ? 3'h5 : _first_entry_T_22; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_24 = _first_entry_T_18 ? 3'h4 : _first_entry_T_23; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_25 = _first_entry_T_17 ? 3'h3 : _first_entry_T_24; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_26 = _first_entry_T_16 ? 3'h2 : _first_entry_T_25; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_27 = _first_entry_T_15 ? 3'h1 : _first_entry_T_26; // @[OneHot.scala:48:45] wire [2:0] first_entry_1 = _first_entry_T_14 ? 3'h0 : _first_entry_T_27; // @[OneHot.scala:48:45] wire [7:0] _masked_entry_T_15 = {2'h0, allocatable_slots_1[5:0] & alloc_lfsr_1}; // @[PRNG.scala:95:17] wire _masked_entry_T_16 = _masked_entry_T_15[0]; // @[OneHot.scala:48:45] wire _masked_entry_T_17 = _masked_entry_T_15[1]; // @[OneHot.scala:48:45] wire _masked_entry_T_18 = _masked_entry_T_15[2]; // @[OneHot.scala:48:45] wire _masked_entry_T_19 = _masked_entry_T_15[3]; // @[OneHot.scala:48:45] wire _masked_entry_T_20 = _masked_entry_T_15[4]; // @[OneHot.scala:48:45] wire _masked_entry_T_21 = _masked_entry_T_15[5]; // @[OneHot.scala:48:45] wire _masked_entry_T_22 = _masked_entry_T_15[6]; // @[OneHot.scala:48:45] wire _masked_entry_T_23 = _masked_entry_T_15[7]; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_24 = {2'h3, ~_masked_entry_T_22}; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_25 = _masked_entry_T_21 ? 3'h5 : _masked_entry_T_24; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_26 = _masked_entry_T_20 ? 3'h4 : _masked_entry_T_25; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_27 = _masked_entry_T_19 ? 3'h3 : _masked_entry_T_26; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_28 = _masked_entry_T_18 ? 3'h2 : _masked_entry_T_27; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_29 = _masked_entry_T_17 ? 3'h1 : _masked_entry_T_28; // @[OneHot.scala:48:45] wire [2:0] masked_entry_1 = _masked_entry_T_16 ? 3'h0 : _masked_entry_T_29; // @[OneHot.scala:48:45] wire [7:0] _alloc_entry_T_2 = allocatable_slots_1 >> masked_entry_1; // @[Mux.scala:50:70] wire _alloc_entry_T_3 = _alloc_entry_T_2[0]; // @[tage.scala:309:44] assign alloc_entry_1 = _alloc_entry_T_3 ? masked_entry_1 : first_entry_1; // @[Mux.scala:50:70] assign f3_meta_allocate_1_bits = alloc_entry_1; // @[tage.scala:223:21, :309:26] assign _f3_meta_allocate_1_valid_T = |allocatable_slots_1; // @[tage.scala:302:77, :313:52] assign f3_meta_allocate_1_valid = _f3_meta_allocate_1_valid_T; // @[tage.scala:223:21, :313:52] wire _update_was_taken_T_2 = s1_update_bits_cfi_idx_bits == 2'h1; // @[tage.scala:317:58] wire _update_was_taken_T_3 = s1_update_bits_cfi_idx_valid & _update_was_taken_T_2; // @[tage.scala:316:58, :317:58] wire update_was_taken_1 = _update_was_taken_T_3 & s1_update_bits_cfi_taken; // @[tage.scala:316:58, :317:67] wire _T_87 = s1_update_bits_br_mask[1] & s1_update_valid & _GEN_5 == 5'h0; // @[OneHot.scala:58:35] wire _GEN_13 = _T_87 & s1_update_meta_provider_1_valid & s1_update_meta_provider_1_bits == 3'h0; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_14 = _T_87 & s1_update_meta_provider_1_valid & s1_update_meta_provider_1_bits == 3'h1; // @[tage.scala:229:43, :248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_15 = _T_87 & s1_update_meta_provider_1_valid & s1_update_meta_provider_1_bits == 3'h2; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_16 = _T_87 & s1_update_meta_provider_1_valid & s1_update_meta_provider_1_bits == 3'h3; // @[tage.scala:248:52, :252:33, :288:20, :319:{37,56,92}, :320:47, :323:37] wire _GEN_17 = _T_87 & s1_update_meta_provider_1_valid & s1_update_meta_provider_1_bits == 3'h4; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_18 = _T_87 & s1_update_meta_provider_1_valid & s1_update_meta_provider_1_bits == 3'h5; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _new_u_T_11 = s1_update_mispredict_mask[1]; // @[tage.scala:249:73, :328:52] wire _new_u_T_12 = ~s1_update_meta_alt_differs_1; // @[tage.scala:228:9, :248:52] wire _new_u_T_13 = s1_update_meta_provider_u_1 == 2'h0; // @[tage.scala:229:27, :248:52] wire [2:0] _GEN_19 = {1'h0, s1_update_meta_provider_u_1}; // @[tage.scala:229:43, :248:52] wire [2:0] _new_u_T_14 = _GEN_19 - 3'h1; // @[tage.scala:229:43] wire [1:0] _new_u_T_15 = _new_u_T_14[1:0]; // @[tage.scala:229:43] wire [1:0] _new_u_T_16 = _new_u_T_13 ? 2'h0 : _new_u_T_15; // @[tage.scala:229:{24,27,43}] wire _new_u_T_17 = &s1_update_meta_provider_u_1; // @[tage.scala:230:27, :248:52] wire [2:0] _new_u_T_18 = _GEN_19 + 3'h1; // @[tage.scala:229:43, :230:43] wire [1:0] _new_u_T_19 = _new_u_T_18[1:0]; // @[tage.scala:230:43] wire [1:0] _new_u_T_20 = _new_u_T_17 ? 2'h3 : _new_u_T_19; // @[tage.scala:230:{24,27,43}] wire [1:0] _new_u_T_21 = _new_u_T_11 ? _new_u_T_16 : _new_u_T_20; // @[tage.scala:229:{8,24}, :230:24, :328:52] wire [1:0] new_u_1 = _new_u_T_12 ? s1_update_meta_provider_u_1 : _new_u_T_21; // @[tage.scala:228:{8,9}, :229:8, :248:52] reg s3_provided_2; // @[tage.scala:279:30] assign f3_meta_provider_2_valid = s3_provided_2; // @[tage.scala:223:21, :279:30] reg [2:0] s3_provider_2; // @[tage.scala:280:30] assign f3_meta_provider_2_bits = s3_provider_2; // @[tage.scala:223:21, :280:30] reg s3_alt_provided_2; // @[tage.scala:281:34] reg [2:0] s3_alt_provider_2; // @[tage.scala:282:34] reg [2:0] prov_2_ctr; // @[tage.scala:284:23] assign f3_meta_provider_ctr_2 = prov_2_ctr; // @[tage.scala:223:21, :284:23] reg [1:0] prov_2_u; // @[tage.scala:284:23] assign f3_meta_provider_u_2 = prov_2_u; // @[tage.scala:223:21, :284:23] reg [2:0] alt_2_ctr; // @[tage.scala:285:23] reg [1:0] alt_2_u; // @[tage.scala:285:23] wire _io_resp_f3_2_taken_T = prov_2_ctr == 3'h3; // @[tage.scala:284:23, :288:20] wire _io_resp_f3_2_taken_T_1 = prov_2_ctr == 3'h4; // @[tage.scala:284:23, :288:40] wire _io_resp_f3_2_taken_T_2 = _io_resp_f3_2_taken_T | _io_resp_f3_2_taken_T_1; // @[tage.scala:288:{20,28,40}] wire _io_resp_f3_2_taken_T_3 = alt_2_ctr[2]; // @[tage.scala:285:23, :289:37] wire _f3_meta_alt_differs_2_T = alt_2_ctr[2]; // @[tage.scala:285:23, :289:37, :295:60] wire _io_resp_f3_2_taken_T_4 = s3_alt_provided_2 ? _io_resp_f3_2_taken_T_3 : io_resp_in_0_f3_2_taken_0; // @[tage.scala:209:7, :281:34, :289:{12,37}] wire _io_resp_f3_2_taken_T_5 = prov_2_ctr[2]; // @[tage.scala:284:23, :290:17] wire _io_resp_f3_2_taken_T_6 = _io_resp_f3_2_taken_T_2 ? _io_resp_f3_2_taken_T_4 : _io_resp_f3_2_taken_T_5; // @[tage.scala:288:{10,28}, :289:12, :290:17] assign _io_resp_f3_2_taken_T_7 = s3_provided_2 ? _io_resp_f3_2_taken_T_6 : io_resp_in_0_f3_2_taken_0; // @[tage.scala:209:7, :279:30, :287:31, :288:10] assign io_resp_f3_2_taken_0 = _io_resp_f3_2_taken_T_7; // @[tage.scala:209:7, :287:31] wire _f3_meta_alt_differs_2_T_1 = _f3_meta_alt_differs_2_T != io_resp_f3_2_taken_0; // @[tage.scala:209:7, :295:{60,64}] assign _f3_meta_alt_differs_2_T_2 = s3_alt_provided_2 & _f3_meta_alt_differs_2_T_1; // @[tage.scala:281:34, :295:{50,64}] assign f3_meta_alt_differs_2 = _f3_meta_alt_differs_2_T_2; // @[tage.scala:223:21, :295:50] wire _allocatable_slots_T_76 = ~f3_resps_0_2_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_77 = f3_resps_0_2_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_78 = _allocatable_slots_T_76 & _allocatable_slots_T_77; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_2_0 = _allocatable_slots_T_78; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_79 = ~f3_resps_1_2_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_80 = f3_resps_1_2_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_81 = _allocatable_slots_T_79 & _allocatable_slots_T_80; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_2_1 = _allocatable_slots_T_81; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_82 = ~f3_resps_2_2_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_83 = f3_resps_2_2_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_84 = _allocatable_slots_T_82 & _allocatable_slots_T_83; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_2_2 = _allocatable_slots_T_84; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_85 = ~f3_resps_3_2_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_86 = f3_resps_3_2_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_87 = _allocatable_slots_T_85 & _allocatable_slots_T_86; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_2_3 = _allocatable_slots_T_87; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_88 = ~f3_resps_4_2_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_89 = f3_resps_4_2_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_90 = _allocatable_slots_T_88 & _allocatable_slots_T_89; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_2_4 = _allocatable_slots_T_90; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_91 = ~f3_resps_5_2_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_92 = f3_resps_5_2_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_93 = _allocatable_slots_T_91 & _allocatable_slots_T_92; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_2_5 = _allocatable_slots_T_93; // @[tage.scala:302:{14,45}] wire [1:0] allocatable_slots_lo_hi_2 = {_allocatable_slots_WIRE_2_2, _allocatable_slots_WIRE_2_1}; // @[tage.scala:302:{14,70}] wire [2:0] allocatable_slots_lo_2 = {allocatable_slots_lo_hi_2, _allocatable_slots_WIRE_2_0}; // @[tage.scala:302:{14,70}] wire [1:0] allocatable_slots_hi_hi_2 = {_allocatable_slots_WIRE_2_5, _allocatable_slots_WIRE_2_4}; // @[tage.scala:302:{14,70}] wire [2:0] allocatable_slots_hi_2 = {allocatable_slots_hi_hi_2, _allocatable_slots_WIRE_2_3}; // @[tage.scala:302:{14,70}] wire [5:0] _allocatable_slots_T_94 = {allocatable_slots_hi_2, allocatable_slots_lo_2}; // @[tage.scala:302:70] wire [7:0] _allocatable_slots_T_95 = 8'h1 << f3_meta_provider_2_bits; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_96 = _allocatable_slots_T_95; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_97 = {1'h0, _allocatable_slots_T_95[7:1]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_98 = {2'h0, _allocatable_slots_T_95[7:2]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_99 = {3'h0, _allocatable_slots_T_95[7:3]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_100 = {4'h0, _allocatable_slots_T_95[7:4]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_101 = {5'h0, _allocatable_slots_T_95[7:5]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_102 = {6'h0, _allocatable_slots_T_95[7:6]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_103 = {7'h0, _allocatable_slots_T_95[7]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_104 = _allocatable_slots_T_96 | _allocatable_slots_T_97; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_105 = _allocatable_slots_T_104 | _allocatable_slots_T_98; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_106 = _allocatable_slots_T_105 | _allocatable_slots_T_99; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_107 = _allocatable_slots_T_106 | _allocatable_slots_T_100; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_108 = _allocatable_slots_T_107 | _allocatable_slots_T_101; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_109 = _allocatable_slots_T_108 | _allocatable_slots_T_102; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_110 = _allocatable_slots_T_109 | _allocatable_slots_T_103; // @[util.scala:383:{29,45}] wire [5:0] _allocatable_slots_T_111 = {6{f3_meta_provider_2_valid}}; // @[tage.scala:223:21, :303:61] wire [7:0] _allocatable_slots_T_112 = {2'h0, _allocatable_slots_T_110[5:0] & _allocatable_slots_T_111}; // @[util.scala:383:45] wire [7:0] _allocatable_slots_T_113 = ~_allocatable_slots_T_112; // @[tage.scala:303:{7,55}] wire [7:0] allocatable_slots_2 = {2'h0, _allocatable_slots_T_113[5:0] & _allocatable_slots_T_94}; // @[tage.scala:302:{70,77}, :303:7] wire [1:0] alloc_lfsr_lo_hi_2 = {_alloc_lfsr_prng_2_io_out_2, _alloc_lfsr_prng_2_io_out_1}; // @[PRNG.scala:91:22, :95:17] wire [2:0] alloc_lfsr_lo_2 = {alloc_lfsr_lo_hi_2, _alloc_lfsr_prng_2_io_out_0}; // @[PRNG.scala:91:22, :95:17] wire [1:0] alloc_lfsr_hi_hi_2 = {_alloc_lfsr_prng_2_io_out_5, _alloc_lfsr_prng_2_io_out_4}; // @[PRNG.scala:91:22, :95:17] wire [2:0] alloc_lfsr_hi_2 = {alloc_lfsr_hi_hi_2, _alloc_lfsr_prng_2_io_out_3}; // @[PRNG.scala:91:22, :95:17] wire [5:0] alloc_lfsr_2 = {alloc_lfsr_hi_2, alloc_lfsr_lo_2}; // @[PRNG.scala:95:17] wire _first_entry_T_28 = allocatable_slots_2[0]; // @[OneHot.scala:48:45] wire _first_entry_T_29 = allocatable_slots_2[1]; // @[OneHot.scala:48:45] wire _first_entry_T_30 = allocatable_slots_2[2]; // @[OneHot.scala:48:45] wire _first_entry_T_31 = allocatable_slots_2[3]; // @[OneHot.scala:48:45] wire _first_entry_T_32 = allocatable_slots_2[4]; // @[OneHot.scala:48:45] wire _first_entry_T_33 = allocatable_slots_2[5]; // @[OneHot.scala:48:45] wire _first_entry_T_34 = allocatable_slots_2[6]; // @[OneHot.scala:48:45] wire _first_entry_T_35 = allocatable_slots_2[7]; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_36 = {2'h3, ~_first_entry_T_34}; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_37 = _first_entry_T_33 ? 3'h5 : _first_entry_T_36; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_38 = _first_entry_T_32 ? 3'h4 : _first_entry_T_37; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_39 = _first_entry_T_31 ? 3'h3 : _first_entry_T_38; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_40 = _first_entry_T_30 ? 3'h2 : _first_entry_T_39; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_41 = _first_entry_T_29 ? 3'h1 : _first_entry_T_40; // @[OneHot.scala:48:45] wire [2:0] first_entry_2 = _first_entry_T_28 ? 3'h0 : _first_entry_T_41; // @[OneHot.scala:48:45] wire [7:0] _masked_entry_T_30 = {2'h0, allocatable_slots_2[5:0] & alloc_lfsr_2}; // @[PRNG.scala:95:17] wire _masked_entry_T_31 = _masked_entry_T_30[0]; // @[OneHot.scala:48:45] wire _masked_entry_T_32 = _masked_entry_T_30[1]; // @[OneHot.scala:48:45] wire _masked_entry_T_33 = _masked_entry_T_30[2]; // @[OneHot.scala:48:45] wire _masked_entry_T_34 = _masked_entry_T_30[3]; // @[OneHot.scala:48:45] wire _masked_entry_T_35 = _masked_entry_T_30[4]; // @[OneHot.scala:48:45] wire _masked_entry_T_36 = _masked_entry_T_30[5]; // @[OneHot.scala:48:45] wire _masked_entry_T_37 = _masked_entry_T_30[6]; // @[OneHot.scala:48:45] wire _masked_entry_T_38 = _masked_entry_T_30[7]; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_39 = {2'h3, ~_masked_entry_T_37}; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_40 = _masked_entry_T_36 ? 3'h5 : _masked_entry_T_39; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_41 = _masked_entry_T_35 ? 3'h4 : _masked_entry_T_40; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_42 = _masked_entry_T_34 ? 3'h3 : _masked_entry_T_41; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_43 = _masked_entry_T_33 ? 3'h2 : _masked_entry_T_42; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_44 = _masked_entry_T_32 ? 3'h1 : _masked_entry_T_43; // @[OneHot.scala:48:45] wire [2:0] masked_entry_2 = _masked_entry_T_31 ? 3'h0 : _masked_entry_T_44; // @[OneHot.scala:48:45] wire [7:0] _alloc_entry_T_4 = allocatable_slots_2 >> masked_entry_2; // @[Mux.scala:50:70] wire _alloc_entry_T_5 = _alloc_entry_T_4[0]; // @[tage.scala:309:44] assign alloc_entry_2 = _alloc_entry_T_5 ? masked_entry_2 : first_entry_2; // @[Mux.scala:50:70] assign f3_meta_allocate_2_bits = alloc_entry_2; // @[tage.scala:223:21, :309:26] assign _f3_meta_allocate_2_valid_T = |allocatable_slots_2; // @[tage.scala:302:77, :313:52] assign f3_meta_allocate_2_valid = _f3_meta_allocate_2_valid_T; // @[tage.scala:223:21, :313:52] wire _update_was_taken_T_4 = s1_update_bits_cfi_idx_bits == 2'h2; // @[tage.scala:317:58] wire _update_was_taken_T_5 = s1_update_bits_cfi_idx_valid & _update_was_taken_T_4; // @[tage.scala:316:58, :317:58] wire update_was_taken_2 = _update_was_taken_T_5 & s1_update_bits_cfi_taken; // @[tage.scala:316:58, :317:67] wire _T_124 = s1_update_bits_br_mask[2] & s1_update_valid & _GEN_5 == 5'h0; // @[OneHot.scala:58:35] wire _GEN_20 = _T_124 & s1_update_meta_provider_2_valid & s1_update_meta_provider_2_bits == 3'h0; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_21 = _T_124 & s1_update_meta_provider_2_valid & s1_update_meta_provider_2_bits == 3'h1; // @[tage.scala:229:43, :248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_22 = _T_124 & s1_update_meta_provider_2_valid & s1_update_meta_provider_2_bits == 3'h2; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_23 = _T_124 & s1_update_meta_provider_2_valid & s1_update_meta_provider_2_bits == 3'h3; // @[tage.scala:248:52, :252:33, :288:20, :319:{37,56,92}, :320:47, :323:37] wire _GEN_24 = _T_124 & s1_update_meta_provider_2_valid & s1_update_meta_provider_2_bits == 3'h4; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_25 = _T_124 & s1_update_meta_provider_2_valid & s1_update_meta_provider_2_bits == 3'h5; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _new_u_T_22 = s1_update_mispredict_mask[2]; // @[tage.scala:249:73, :328:52] wire _new_u_T_23 = ~s1_update_meta_alt_differs_2; // @[tage.scala:228:9, :248:52] wire _new_u_T_24 = s1_update_meta_provider_u_2 == 2'h0; // @[tage.scala:229:27, :248:52] wire [2:0] _GEN_26 = {1'h0, s1_update_meta_provider_u_2}; // @[tage.scala:229:43, :248:52] wire [2:0] _new_u_T_25 = _GEN_26 - 3'h1; // @[tage.scala:229:43] wire [1:0] _new_u_T_26 = _new_u_T_25[1:0]; // @[tage.scala:229:43] wire [1:0] _new_u_T_27 = _new_u_T_24 ? 2'h0 : _new_u_T_26; // @[tage.scala:229:{24,27,43}] wire _new_u_T_28 = &s1_update_meta_provider_u_2; // @[tage.scala:230:27, :248:52] wire [2:0] _new_u_T_29 = _GEN_26 + 3'h1; // @[tage.scala:229:43, :230:43] wire [1:0] _new_u_T_30 = _new_u_T_29[1:0]; // @[tage.scala:230:43] wire [1:0] _new_u_T_31 = _new_u_T_28 ? 2'h3 : _new_u_T_30; // @[tage.scala:230:{24,27,43}] wire [1:0] _new_u_T_32 = _new_u_T_22 ? _new_u_T_27 : _new_u_T_31; // @[tage.scala:229:{8,24}, :230:24, :328:52] wire [1:0] new_u_2 = _new_u_T_23 ? s1_update_meta_provider_u_2 : _new_u_T_32; // @[tage.scala:228:{8,9}, :229:8, :248:52] reg s3_provided_3; // @[tage.scala:279:30] assign f3_meta_provider_3_valid = s3_provided_3; // @[tage.scala:223:21, :279:30] reg [2:0] s3_provider_3; // @[tage.scala:280:30] assign f3_meta_provider_3_bits = s3_provider_3; // @[tage.scala:223:21, :280:30] reg s3_alt_provided_3; // @[tage.scala:281:34] reg [2:0] s3_alt_provider_3; // @[tage.scala:282:34] reg [2:0] prov_3_ctr; // @[tage.scala:284:23] assign f3_meta_provider_ctr_3 = prov_3_ctr; // @[tage.scala:223:21, :284:23] reg [1:0] prov_3_u; // @[tage.scala:284:23] assign f3_meta_provider_u_3 = prov_3_u; // @[tage.scala:223:21, :284:23] reg [2:0] alt_3_ctr; // @[tage.scala:285:23] reg [1:0] alt_3_u; // @[tage.scala:285:23] wire _io_resp_f3_3_taken_T = prov_3_ctr == 3'h3; // @[tage.scala:284:23, :288:20] wire _io_resp_f3_3_taken_T_1 = prov_3_ctr == 3'h4; // @[tage.scala:284:23, :288:40] wire _io_resp_f3_3_taken_T_2 = _io_resp_f3_3_taken_T | _io_resp_f3_3_taken_T_1; // @[tage.scala:288:{20,28,40}] wire _io_resp_f3_3_taken_T_3 = alt_3_ctr[2]; // @[tage.scala:285:23, :289:37] wire _f3_meta_alt_differs_3_T = alt_3_ctr[2]; // @[tage.scala:285:23, :289:37, :295:60] wire _io_resp_f3_3_taken_T_4 = s3_alt_provided_3 ? _io_resp_f3_3_taken_T_3 : io_resp_in_0_f3_3_taken_0; // @[tage.scala:209:7, :281:34, :289:{12,37}] wire _io_resp_f3_3_taken_T_5 = prov_3_ctr[2]; // @[tage.scala:284:23, :290:17] wire _io_resp_f3_3_taken_T_6 = _io_resp_f3_3_taken_T_2 ? _io_resp_f3_3_taken_T_4 : _io_resp_f3_3_taken_T_5; // @[tage.scala:288:{10,28}, :289:12, :290:17] assign _io_resp_f3_3_taken_T_7 = s3_provided_3 ? _io_resp_f3_3_taken_T_6 : io_resp_in_0_f3_3_taken_0; // @[tage.scala:209:7, :279:30, :287:31, :288:10] assign io_resp_f3_3_taken_0 = _io_resp_f3_3_taken_T_7; // @[tage.scala:209:7, :287:31] wire _f3_meta_alt_differs_3_T_1 = _f3_meta_alt_differs_3_T != io_resp_f3_3_taken_0; // @[tage.scala:209:7, :295:{60,64}] assign _f3_meta_alt_differs_3_T_2 = s3_alt_provided_3 & _f3_meta_alt_differs_3_T_1; // @[tage.scala:281:34, :295:{50,64}] assign f3_meta_alt_differs_3 = _f3_meta_alt_differs_3_T_2; // @[tage.scala:223:21, :295:50] wire _allocatable_slots_T_114 = ~f3_resps_0_3_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_115 = f3_resps_0_3_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_116 = _allocatable_slots_T_114 & _allocatable_slots_T_115; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_3_0 = _allocatable_slots_T_116; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_117 = ~f3_resps_1_3_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_118 = f3_resps_1_3_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_119 = _allocatable_slots_T_117 & _allocatable_slots_T_118; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_3_1 = _allocatable_slots_T_119; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_120 = ~f3_resps_2_3_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_121 = f3_resps_2_3_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_122 = _allocatable_slots_T_120 & _allocatable_slots_T_121; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_3_2 = _allocatable_slots_T_122; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_123 = ~f3_resps_3_3_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_124 = f3_resps_3_3_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_125 = _allocatable_slots_T_123 & _allocatable_slots_T_124; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_3_3 = _allocatable_slots_T_125; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_126 = ~f3_resps_4_3_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_127 = f3_resps_4_3_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_128 = _allocatable_slots_T_126 & _allocatable_slots_T_127; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_3_4 = _allocatable_slots_T_128; // @[tage.scala:302:{14,45}] wire _allocatable_slots_T_129 = ~f3_resps_5_3_valid; // @[tage.scala:246:25, :302:33] wire _allocatable_slots_T_130 = f3_resps_5_3_bits_u == 2'h0; // @[tage.scala:246:25, :302:60] wire _allocatable_slots_T_131 = _allocatable_slots_T_129 & _allocatable_slots_T_130; // @[tage.scala:302:{33,45,60}] wire _allocatable_slots_WIRE_3_5 = _allocatable_slots_T_131; // @[tage.scala:302:{14,45}] wire [1:0] allocatable_slots_lo_hi_3 = {_allocatable_slots_WIRE_3_2, _allocatable_slots_WIRE_3_1}; // @[tage.scala:302:{14,70}] wire [2:0] allocatable_slots_lo_3 = {allocatable_slots_lo_hi_3, _allocatable_slots_WIRE_3_0}; // @[tage.scala:302:{14,70}] wire [1:0] allocatable_slots_hi_hi_3 = {_allocatable_slots_WIRE_3_5, _allocatable_slots_WIRE_3_4}; // @[tage.scala:302:{14,70}] wire [2:0] allocatable_slots_hi_3 = {allocatable_slots_hi_hi_3, _allocatable_slots_WIRE_3_3}; // @[tage.scala:302:{14,70}] wire [5:0] _allocatable_slots_T_132 = {allocatable_slots_hi_3, allocatable_slots_lo_3}; // @[tage.scala:302:70] wire [7:0] _allocatable_slots_T_133 = 8'h1 << f3_meta_provider_3_bits; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_134 = _allocatable_slots_T_133; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_135 = {1'h0, _allocatable_slots_T_133[7:1]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_136 = {2'h0, _allocatable_slots_T_133[7:2]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_137 = {3'h0, _allocatable_slots_T_133[7:3]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_138 = {4'h0, _allocatable_slots_T_133[7:4]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_139 = {5'h0, _allocatable_slots_T_133[7:5]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_140 = {6'h0, _allocatable_slots_T_133[7:6]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_141 = {7'h0, _allocatable_slots_T_133[7]}; // @[OneHot.scala:58:35] wire [7:0] _allocatable_slots_T_142 = _allocatable_slots_T_134 | _allocatable_slots_T_135; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_143 = _allocatable_slots_T_142 | _allocatable_slots_T_136; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_144 = _allocatable_slots_T_143 | _allocatable_slots_T_137; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_145 = _allocatable_slots_T_144 | _allocatable_slots_T_138; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_146 = _allocatable_slots_T_145 | _allocatable_slots_T_139; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_147 = _allocatable_slots_T_146 | _allocatable_slots_T_140; // @[util.scala:383:{29,45}] wire [7:0] _allocatable_slots_T_148 = _allocatable_slots_T_147 | _allocatable_slots_T_141; // @[util.scala:383:{29,45}] wire [5:0] _allocatable_slots_T_149 = {6{f3_meta_provider_3_valid}}; // @[tage.scala:223:21, :303:61] wire [7:0] _allocatable_slots_T_150 = {2'h0, _allocatable_slots_T_148[5:0] & _allocatable_slots_T_149}; // @[util.scala:383:45] wire [7:0] _allocatable_slots_T_151 = ~_allocatable_slots_T_150; // @[tage.scala:303:{7,55}] wire [7:0] allocatable_slots_3 = {2'h0, _allocatable_slots_T_151[5:0] & _allocatable_slots_T_132}; // @[tage.scala:302:{70,77}, :303:7] wire [1:0] alloc_lfsr_lo_hi_3 = {_alloc_lfsr_prng_3_io_out_2, _alloc_lfsr_prng_3_io_out_1}; // @[PRNG.scala:91:22, :95:17] wire [2:0] alloc_lfsr_lo_3 = {alloc_lfsr_lo_hi_3, _alloc_lfsr_prng_3_io_out_0}; // @[PRNG.scala:91:22, :95:17] wire [1:0] alloc_lfsr_hi_hi_3 = {_alloc_lfsr_prng_3_io_out_5, _alloc_lfsr_prng_3_io_out_4}; // @[PRNG.scala:91:22, :95:17] wire [2:0] alloc_lfsr_hi_3 = {alloc_lfsr_hi_hi_3, _alloc_lfsr_prng_3_io_out_3}; // @[PRNG.scala:91:22, :95:17] wire [5:0] alloc_lfsr_3 = {alloc_lfsr_hi_3, alloc_lfsr_lo_3}; // @[PRNG.scala:95:17] wire _first_entry_T_42 = allocatable_slots_3[0]; // @[OneHot.scala:48:45] wire _first_entry_T_43 = allocatable_slots_3[1]; // @[OneHot.scala:48:45] wire _first_entry_T_44 = allocatable_slots_3[2]; // @[OneHot.scala:48:45] wire _first_entry_T_45 = allocatable_slots_3[3]; // @[OneHot.scala:48:45] wire _first_entry_T_46 = allocatable_slots_3[4]; // @[OneHot.scala:48:45] wire _first_entry_T_47 = allocatable_slots_3[5]; // @[OneHot.scala:48:45] wire _first_entry_T_48 = allocatable_slots_3[6]; // @[OneHot.scala:48:45] wire _first_entry_T_49 = allocatable_slots_3[7]; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_50 = {2'h3, ~_first_entry_T_48}; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_51 = _first_entry_T_47 ? 3'h5 : _first_entry_T_50; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_52 = _first_entry_T_46 ? 3'h4 : _first_entry_T_51; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_53 = _first_entry_T_45 ? 3'h3 : _first_entry_T_52; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_54 = _first_entry_T_44 ? 3'h2 : _first_entry_T_53; // @[OneHot.scala:48:45] wire [2:0] _first_entry_T_55 = _first_entry_T_43 ? 3'h1 : _first_entry_T_54; // @[OneHot.scala:48:45] wire [2:0] first_entry_3 = _first_entry_T_42 ? 3'h0 : _first_entry_T_55; // @[OneHot.scala:48:45] wire [7:0] _masked_entry_T_45 = {2'h0, allocatable_slots_3[5:0] & alloc_lfsr_3}; // @[PRNG.scala:95:17] wire _masked_entry_T_46 = _masked_entry_T_45[0]; // @[OneHot.scala:48:45] wire _masked_entry_T_47 = _masked_entry_T_45[1]; // @[OneHot.scala:48:45] wire _masked_entry_T_48 = _masked_entry_T_45[2]; // @[OneHot.scala:48:45] wire _masked_entry_T_49 = _masked_entry_T_45[3]; // @[OneHot.scala:48:45] wire _masked_entry_T_50 = _masked_entry_T_45[4]; // @[OneHot.scala:48:45] wire _masked_entry_T_51 = _masked_entry_T_45[5]; // @[OneHot.scala:48:45] wire _masked_entry_T_52 = _masked_entry_T_45[6]; // @[OneHot.scala:48:45] wire _masked_entry_T_53 = _masked_entry_T_45[7]; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_54 = {2'h3, ~_masked_entry_T_52}; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_55 = _masked_entry_T_51 ? 3'h5 : _masked_entry_T_54; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_56 = _masked_entry_T_50 ? 3'h4 : _masked_entry_T_55; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_57 = _masked_entry_T_49 ? 3'h3 : _masked_entry_T_56; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_58 = _masked_entry_T_48 ? 3'h2 : _masked_entry_T_57; // @[OneHot.scala:48:45] wire [2:0] _masked_entry_T_59 = _masked_entry_T_47 ? 3'h1 : _masked_entry_T_58; // @[OneHot.scala:48:45] wire [2:0] masked_entry_3 = _masked_entry_T_46 ? 3'h0 : _masked_entry_T_59; // @[OneHot.scala:48:45] wire [7:0] _alloc_entry_T_6 = allocatable_slots_3 >> masked_entry_3; // @[Mux.scala:50:70] wire _alloc_entry_T_7 = _alloc_entry_T_6[0]; // @[tage.scala:309:44] assign alloc_entry_3 = _alloc_entry_T_7 ? masked_entry_3 : first_entry_3; // @[Mux.scala:50:70] assign f3_meta_allocate_3_bits = alloc_entry_3; // @[tage.scala:223:21, :309:26] assign _f3_meta_allocate_3_valid_T = |allocatable_slots_3; // @[tage.scala:302:77, :313:52] assign f3_meta_allocate_3_valid = _f3_meta_allocate_3_valid_T; // @[tage.scala:223:21, :313:52] wire _update_was_taken_T_6 = &s1_update_bits_cfi_idx_bits; // @[tage.scala:317:58] wire _update_was_taken_T_7 = s1_update_bits_cfi_idx_valid & _update_was_taken_T_6; // @[tage.scala:316:58, :317:58] wire update_was_taken_3 = _update_was_taken_T_7 & s1_update_bits_cfi_taken; // @[tage.scala:316:58, :317:67] wire _T_161 = s1_update_bits_br_mask[3] & s1_update_valid & _GEN_5 == 5'h0; // @[OneHot.scala:58:35] wire _GEN_27 = _T_161 & s1_update_meta_provider_3_valid & s1_update_meta_provider_3_bits == 3'h0; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_28 = _T_161 & s1_update_meta_provider_3_valid & s1_update_meta_provider_3_bits == 3'h1; // @[tage.scala:229:43, :248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_29 = _T_161 & s1_update_meta_provider_3_valid & s1_update_meta_provider_3_bits == 3'h2; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_30 = _T_161 & s1_update_meta_provider_3_valid & s1_update_meta_provider_3_bits == 3'h3; // @[tage.scala:248:52, :252:33, :288:20, :319:{37,56,92}, :320:47, :323:37] wire _GEN_31 = _T_161 & s1_update_meta_provider_3_valid & s1_update_meta_provider_3_bits == 3'h4; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _GEN_32 = _T_161 & s1_update_meta_provider_3_valid & s1_update_meta_provider_3_bits == 3'h5; // @[tage.scala:248:52, :252:33, :319:{37,56,92}, :320:47, :323:37] wire _new_u_T_33 = s1_update_mispredict_mask[3]; // @[tage.scala:249:73, :328:52] wire _new_u_T_34 = ~s1_update_meta_alt_differs_3; // @[tage.scala:228:9, :248:52] wire _new_u_T_35 = s1_update_meta_provider_u_3 == 2'h0; // @[tage.scala:229:27, :248:52] wire [2:0] _GEN_33 = {1'h0, s1_update_meta_provider_u_3}; // @[tage.scala:229:43, :248:52] wire [2:0] _new_u_T_36 = _GEN_33 - 3'h1; // @[tage.scala:229:43] wire [1:0] _new_u_T_37 = _new_u_T_36[1:0]; // @[tage.scala:229:43] wire [1:0] _new_u_T_38 = _new_u_T_35 ? 2'h0 : _new_u_T_37; // @[tage.scala:229:{24,27,43}] wire _new_u_T_39 = &s1_update_meta_provider_u_3; // @[tage.scala:230:27, :248:52] wire [2:0] _new_u_T_40 = _GEN_33 + 3'h1; // @[tage.scala:229:43, :230:43] wire [1:0] _new_u_T_41 = _new_u_T_40[1:0]; // @[tage.scala:230:43] wire [1:0] _new_u_T_42 = _new_u_T_39 ? 2'h3 : _new_u_T_41; // @[tage.scala:230:{24,27,43}] wire [1:0] _new_u_T_43 = _new_u_T_33 ? _new_u_T_38 : _new_u_T_42; // @[tage.scala:229:{8,24}, :230:24, :328:52] wire [1:0] new_u_3 = _new_u_T_34 ? s1_update_meta_provider_u_3 : _new_u_T_43; // @[tage.scala:228:{8,9}, :229:8, :248:52] wire _T_168 = s1_update_valid & _GEN_5 == 5'h0 & s1_update_bits_cfi_mispredicted & s1_update_bits_cfi_idx_valid; // @[OneHot.scala:58:35] wire [3:0] _GEN_34 = {{s1_update_meta_allocate_3_valid}, {s1_update_meta_allocate_2_valid}, {s1_update_meta_allocate_1_valid}, {s1_update_meta_allocate_0_valid}}; // @[tage.scala:248:52, :340:27] wire _GEN_35 = _GEN_34[s1_update_bits_cfi_idx_bits]; // @[tage.scala:340:27] wire [3:0][2:0] _GEN_36 = {{s1_update_meta_allocate_3_bits}, {s1_update_meta_allocate_2_bits}, {s1_update_meta_allocate_1_bits}, {s1_update_meta_allocate_0_bits}}; // @[tage.scala:248:52, :340:27] wire [2:0] _GEN_37 = _GEN_36[s1_update_bits_cfi_idx_bits]; // @[tage.scala:340:27] wire _GEN_38 = _GEN_37 == 3'h0; // @[tage.scala:340:27, :341:43] wire _GEN_39 = _T_168 & _GEN_35 & _GEN_38; // @[tage.scala:319:92, :337:{25,60,95,128}, :340:27, :341:43] assign s1_update_alloc_0_0 = _GEN_39 & _update_was_taken_T; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_0_0 = s1_update_alloc_0_0 | _GEN_6; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_0_1 = _GEN_39 & _update_was_taken_T_2; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_0_1 = s1_update_alloc_0_1 | _GEN_13; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_0_2 = _GEN_39 & _update_was_taken_T_4; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_0_2 = s1_update_alloc_0_2 | _GEN_20; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_0_3 = _GEN_39 & (&s1_update_bits_cfi_idx_bits); // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_0_3 = s1_update_alloc_0_3 | _GEN_27; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] wire _GEN_40 = _GEN_37 == 3'h1; // @[tage.scala:229:43, :340:27, :341:43] wire _GEN_41 = _T_168 & _GEN_35 & _GEN_40; // @[tage.scala:319:92, :337:{25,60,95,128}, :340:27, :341:43] assign s1_update_alloc_1_0 = _GEN_41 & _update_was_taken_T; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_1_0 = s1_update_alloc_1_0 | _GEN_7; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_1_1 = _GEN_41 & _update_was_taken_T_2; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_1_1 = s1_update_alloc_1_1 | _GEN_14; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_1_2 = _GEN_41 & _update_was_taken_T_4; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_1_2 = s1_update_alloc_1_2 | _GEN_21; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_1_3 = _GEN_41 & (&s1_update_bits_cfi_idx_bits); // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_1_3 = s1_update_alloc_1_3 | _GEN_28; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] wire _GEN_42 = _GEN_37 == 3'h2; // @[tage.scala:340:27, :341:43] wire _GEN_43 = _T_168 & _GEN_35 & _GEN_42; // @[tage.scala:319:92, :337:{25,60,95,128}, :340:27, :341:43] assign s1_update_alloc_2_0 = _GEN_43 & _update_was_taken_T; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_2_0 = s1_update_alloc_2_0 | _GEN_8; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_2_1 = _GEN_43 & _update_was_taken_T_2; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_2_1 = s1_update_alloc_2_1 | _GEN_15; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_2_2 = _GEN_43 & _update_was_taken_T_4; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_2_2 = s1_update_alloc_2_2 | _GEN_22; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_2_3 = _GEN_43 & (&s1_update_bits_cfi_idx_bits); // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_2_3 = s1_update_alloc_2_3 | _GEN_29; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] wire _GEN_44 = _GEN_37 == 3'h3; // @[tage.scala:288:20, :340:27, :341:43] wire _GEN_45 = _T_168 & _GEN_35 & _GEN_44; // @[tage.scala:319:92, :337:{25,60,95,128}, :340:27, :341:43] assign s1_update_alloc_3_0 = _GEN_45 & _update_was_taken_T; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_3_0 = s1_update_alloc_3_0 | _GEN_9; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_3_1 = _GEN_45 & _update_was_taken_T_2; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_3_1 = s1_update_alloc_3_1 | _GEN_16; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_3_2 = _GEN_45 & _update_was_taken_T_4; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_3_2 = s1_update_alloc_3_2 | _GEN_23; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_3_3 = _GEN_45 & (&s1_update_bits_cfi_idx_bits); // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_3_3 = s1_update_alloc_3_3 | _GEN_30; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] wire _GEN_46 = _GEN_37 == 3'h4; // @[tage.scala:340:27, :341:43] wire _GEN_47 = _T_168 & _GEN_35 & _GEN_46; // @[tage.scala:319:92, :337:{25,60,95,128}, :340:27, :341:43] assign s1_update_alloc_4_0 = _GEN_47 & _update_was_taken_T; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_4_0 = s1_update_alloc_4_0 | _GEN_10; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_4_1 = _GEN_47 & _update_was_taken_T_2; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_4_1 = s1_update_alloc_4_1 | _GEN_17; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_4_2 = _GEN_47 & _update_was_taken_T_4; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_4_2 = s1_update_alloc_4_2 | _GEN_24; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_4_3 = _GEN_47 & (&s1_update_bits_cfi_idx_bits); // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_4_3 = s1_update_alloc_4_3 | _GEN_31; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] wire _GEN_48 = _GEN_37 == 3'h5; // @[tage.scala:340:27, :341:43] wire _GEN_49 = _T_168 & _GEN_35 & _GEN_48; // @[tage.scala:319:92, :337:{25,60,95,128}, :340:27, :341:43] assign s1_update_alloc_5_0 = _GEN_49 & _update_was_taken_T; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_5_0 = s1_update_alloc_5_0 | _GEN_11; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_5_1 = _GEN_49 & _update_was_taken_T_2; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_5_1 = s1_update_alloc_5_1 | _GEN_18; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_5_2 = _GEN_49 & _update_was_taken_T_4; // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_5_2 = s1_update_alloc_5_2 | _GEN_25; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] assign s1_update_alloc_5_3 = _GEN_49 & (&s1_update_bits_cfi_idx_bits); // @[tage.scala:257:31, :317:58, :319:92, :337:128, :340:27, :341:43] assign s1_update_mask_5_3 = s1_update_alloc_5_3 | _GEN_32; // @[tage.scala:252:33, :257:31, :319:92, :320:47, :323:37, :337:128, :340:27, :341:43] wire _GEN_50 = _GEN_38 & _update_was_taken_T; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_0_0 = _T_168 & _GEN_35 & _GEN_50 ? s1_update_bits_cfi_taken : update_was_taken; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_51 = _GEN_38 & _update_was_taken_T_2; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_0_1 = _T_168 & _GEN_35 & _GEN_51 ? s1_update_bits_cfi_taken : update_was_taken_1; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_52 = _GEN_38 & _update_was_taken_T_4; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_0_2 = _T_168 & _GEN_35 & _GEN_52 ? s1_update_bits_cfi_taken : update_was_taken_2; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_53 = _GEN_38 & (&s1_update_bits_cfi_idx_bits); // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_0_3 = _T_168 & _GEN_35 & _GEN_53 ? s1_update_bits_cfi_taken : update_was_taken_3; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_54 = _GEN_40 & _update_was_taken_T; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_1_0 = _T_168 & _GEN_35 & _GEN_54 ? s1_update_bits_cfi_taken : update_was_taken; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_55 = _GEN_40 & _update_was_taken_T_2; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_1_1 = _T_168 & _GEN_35 & _GEN_55 ? s1_update_bits_cfi_taken : update_was_taken_1; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_56 = _GEN_40 & _update_was_taken_T_4; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_1_2 = _T_168 & _GEN_35 & _GEN_56 ? s1_update_bits_cfi_taken : update_was_taken_2; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_57 = _GEN_40 & (&s1_update_bits_cfi_idx_bits); // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_1_3 = _T_168 & _GEN_35 & _GEN_57 ? s1_update_bits_cfi_taken : update_was_taken_3; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_58 = _GEN_42 & _update_was_taken_T; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_2_0 = _T_168 & _GEN_35 & _GEN_58 ? s1_update_bits_cfi_taken : update_was_taken; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_59 = _GEN_42 & _update_was_taken_T_2; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_2_1 = _T_168 & _GEN_35 & _GEN_59 ? s1_update_bits_cfi_taken : update_was_taken_1; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_60 = _GEN_42 & _update_was_taken_T_4; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_2_2 = _T_168 & _GEN_35 & _GEN_60 ? s1_update_bits_cfi_taken : update_was_taken_2; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_61 = _GEN_42 & (&s1_update_bits_cfi_idx_bits); // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_2_3 = _T_168 & _GEN_35 & _GEN_61 ? s1_update_bits_cfi_taken : update_was_taken_3; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_62 = _GEN_44 & _update_was_taken_T; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_3_0 = _T_168 & _GEN_35 & _GEN_62 ? s1_update_bits_cfi_taken : update_was_taken; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_63 = _GEN_44 & _update_was_taken_T_2; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_3_1 = _T_168 & _GEN_35 & _GEN_63 ? s1_update_bits_cfi_taken : update_was_taken_1; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_64 = _GEN_44 & _update_was_taken_T_4; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_3_2 = _T_168 & _GEN_35 & _GEN_64 ? s1_update_bits_cfi_taken : update_was_taken_2; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_65 = _GEN_44 & (&s1_update_bits_cfi_idx_bits); // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_3_3 = _T_168 & _GEN_35 & _GEN_65 ? s1_update_bits_cfi_taken : update_was_taken_3; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_66 = _GEN_46 & _update_was_taken_T; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_4_0 = _T_168 & _GEN_35 & _GEN_66 ? s1_update_bits_cfi_taken : update_was_taken; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_67 = _GEN_46 & _update_was_taken_T_2; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_4_1 = _T_168 & _GEN_35 & _GEN_67 ? s1_update_bits_cfi_taken : update_was_taken_1; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_68 = _GEN_46 & _update_was_taken_T_4; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_4_2 = _T_168 & _GEN_35 & _GEN_68 ? s1_update_bits_cfi_taken : update_was_taken_2; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_69 = _GEN_46 & (&s1_update_bits_cfi_idx_bits); // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_4_3 = _T_168 & _GEN_35 & _GEN_69 ? s1_update_bits_cfi_taken : update_was_taken_3; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_70 = _GEN_48 & _update_was_taken_T; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_5_0 = _T_168 & _GEN_35 & _GEN_70 ? s1_update_bits_cfi_taken : update_was_taken; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_71 = _GEN_48 & _update_was_taken_T_2; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_5_1 = _T_168 & _GEN_35 & _GEN_71 ? s1_update_bits_cfi_taken : update_was_taken_1; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_72 = _GEN_48 & _update_was_taken_T_4; // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_5_2 = _T_168 & _GEN_35 & _GEN_72 ? s1_update_bits_cfi_taken : update_was_taken_2; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire _GEN_73 = _GEN_48 & (&s1_update_bits_cfi_idx_bits); // @[tage.scala:317:58, :319:92, :341:43, :342:43] assign s1_update_taken_5_3 = _T_168 & _GEN_35 & _GEN_73 ? s1_update_bits_cfi_taken : update_was_taken_3; // @[tage.scala:255:31, :317:67, :319:92, :337:{25,60,95,128}, :340:27, :342:43] wire [3:0] _GEN_74 = {{s1_update_meta_provider_3_valid}, {s1_update_meta_provider_2_valid}, {s1_update_meta_provider_1_valid}, {s1_update_meta_provider_0_valid}}; // @[OneHot.scala:58:35] wire [3:0][2:0] _GEN_75 = {{s1_update_meta_provider_3_bits}, {s1_update_meta_provider_2_bits}, {s1_update_meta_provider_1_bits}, {s1_update_meta_provider_0_bits}}; // @[OneHot.scala:58:35] wire [7:0] _decr_mask_T = 8'h1 << _GEN_75[s1_update_bits_cfi_idx_bits]; // @[OneHot.scala:58:35] wire [7:0] _decr_mask_T_1 = _decr_mask_T; // @[OneHot.scala:58:35] wire [7:0] _decr_mask_T_2 = {1'h0, _decr_mask_T[7:1]}; // @[OneHot.scala:58:35] wire [7:0] _decr_mask_T_3 = {2'h0, _decr_mask_T[7:2]}; // @[OneHot.scala:58:35] wire [7:0] _decr_mask_T_4 = {3'h0, _decr_mask_T[7:3]}; // @[OneHot.scala:58:35] wire [7:0] _decr_mask_T_5 = {4'h0, _decr_mask_T[7:4]}; // @[OneHot.scala:58:35] wire [7:0] _decr_mask_T_6 = {5'h0, _decr_mask_T[7:5]}; // @[OneHot.scala:58:35] wire [7:0] _decr_mask_T_7 = {6'h0, _decr_mask_T[7:6]}; // @[OneHot.scala:58:35] wire [7:0] _decr_mask_T_8 = {7'h0, _decr_mask_T[7]}; // @[OneHot.scala:58:35] wire [7:0] _decr_mask_T_9 = _decr_mask_T_1 | _decr_mask_T_2; // @[util.scala:383:{29,45}] wire [7:0] _decr_mask_T_10 = _decr_mask_T_9 | _decr_mask_T_3; // @[util.scala:383:{29,45}] wire [7:0] _decr_mask_T_11 = _decr_mask_T_10 | _decr_mask_T_4; // @[util.scala:383:{29,45}] wire [7:0] _decr_mask_T_12 = _decr_mask_T_11 | _decr_mask_T_5; // @[util.scala:383:{29,45}] wire [7:0] _decr_mask_T_13 = _decr_mask_T_12 | _decr_mask_T_6; // @[util.scala:383:{29,45}] wire [7:0] _decr_mask_T_14 = _decr_mask_T_13 | _decr_mask_T_7; // @[util.scala:383:{29,45}] wire [7:0] _decr_mask_T_15 = _decr_mask_T_14 | _decr_mask_T_8; // @[util.scala:383:{29,45}] wire [7:0] _decr_mask_T_16 = ~_decr_mask_T_15; // @[util.scala:383:45] wire [7:0] decr_mask = _GEN_74[s1_update_bits_cfi_idx_bits] ? _decr_mask_T_16 : 8'h0; // @[OneHot.scala:58:35] assign s1_update_u_mask_0_0 = _T_168 ? (_GEN_35 ? _GEN_38 & _update_was_taken_T | _GEN_6 : decr_mask[0] & _update_was_taken_T | _GEN_6) : _GEN_6; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_0_1 = _T_168 ? (_GEN_35 ? _GEN_38 & _update_was_taken_T_2 | _GEN_13 : decr_mask[0] & _update_was_taken_T_2 | _GEN_13) : _GEN_13; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_0_2 = _T_168 ? (_GEN_35 ? _GEN_38 & _update_was_taken_T_4 | _GEN_20 : decr_mask[0] & _update_was_taken_T_4 | _GEN_20) : _GEN_20; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_0_3 = _T_168 ? (_GEN_35 ? _GEN_38 & (&s1_update_bits_cfi_idx_bits) | _GEN_27 : decr_mask[0] & (&s1_update_bits_cfi_idx_bits) | _GEN_27) : _GEN_27; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_0_0 = _T_168 & (_GEN_35 ? _GEN_50 : decr_mask[0] & _update_was_taken_T) ? 2'h0 : new_u; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_0_1 = _T_168 & (_GEN_35 ? _GEN_51 : decr_mask[0] & _update_was_taken_T_2) ? 2'h0 : new_u_1; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_0_2 = _T_168 & (_GEN_35 ? _GEN_52 : decr_mask[0] & _update_was_taken_T_4) ? 2'h0 : new_u_2; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_0_3 = _T_168 & (_GEN_35 ? _GEN_53 : decr_mask[0] & (&s1_update_bits_cfi_idx_bits)) ? 2'h0 : new_u_3; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_mask_1_0 = _T_168 ? (_GEN_35 ? _GEN_40 & _update_was_taken_T | _GEN_7 : decr_mask[1] & _update_was_taken_T | _GEN_7) : _GEN_7; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_1_1 = _T_168 ? (_GEN_35 ? _GEN_40 & _update_was_taken_T_2 | _GEN_14 : decr_mask[1] & _update_was_taken_T_2 | _GEN_14) : _GEN_14; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_1_2 = _T_168 ? (_GEN_35 ? _GEN_40 & _update_was_taken_T_4 | _GEN_21 : decr_mask[1] & _update_was_taken_T_4 | _GEN_21) : _GEN_21; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_1_3 = _T_168 ? (_GEN_35 ? _GEN_40 & (&s1_update_bits_cfi_idx_bits) | _GEN_28 : decr_mask[1] & (&s1_update_bits_cfi_idx_bits) | _GEN_28) : _GEN_28; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_1_0 = _T_168 & (_GEN_35 ? _GEN_54 : decr_mask[1] & _update_was_taken_T) ? 2'h0 : new_u; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_1_1 = _T_168 & (_GEN_35 ? _GEN_55 : decr_mask[1] & _update_was_taken_T_2) ? 2'h0 : new_u_1; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_1_2 = _T_168 & (_GEN_35 ? _GEN_56 : decr_mask[1] & _update_was_taken_T_4) ? 2'h0 : new_u_2; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_1_3 = _T_168 & (_GEN_35 ? _GEN_57 : decr_mask[1] & (&s1_update_bits_cfi_idx_bits)) ? 2'h0 : new_u_3; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_mask_2_0 = _T_168 ? (_GEN_35 ? _GEN_42 & _update_was_taken_T | _GEN_8 : decr_mask[2] & _update_was_taken_T | _GEN_8) : _GEN_8; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_2_1 = _T_168 ? (_GEN_35 ? _GEN_42 & _update_was_taken_T_2 | _GEN_15 : decr_mask[2] & _update_was_taken_T_2 | _GEN_15) : _GEN_15; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_2_2 = _T_168 ? (_GEN_35 ? _GEN_42 & _update_was_taken_T_4 | _GEN_22 : decr_mask[2] & _update_was_taken_T_4 | _GEN_22) : _GEN_22; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_2_3 = _T_168 ? (_GEN_35 ? _GEN_42 & (&s1_update_bits_cfi_idx_bits) | _GEN_29 : decr_mask[2] & (&s1_update_bits_cfi_idx_bits) | _GEN_29) : _GEN_29; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_2_0 = _T_168 & (_GEN_35 ? _GEN_58 : decr_mask[2] & _update_was_taken_T) ? 2'h0 : new_u; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_2_1 = _T_168 & (_GEN_35 ? _GEN_59 : decr_mask[2] & _update_was_taken_T_2) ? 2'h0 : new_u_1; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_2_2 = _T_168 & (_GEN_35 ? _GEN_60 : decr_mask[2] & _update_was_taken_T_4) ? 2'h0 : new_u_2; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_2_3 = _T_168 & (_GEN_35 ? _GEN_61 : decr_mask[2] & (&s1_update_bits_cfi_idx_bits)) ? 2'h0 : new_u_3; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_mask_3_0 = _T_168 ? (_GEN_35 ? _GEN_44 & _update_was_taken_T | _GEN_9 : decr_mask[3] & _update_was_taken_T | _GEN_9) : _GEN_9; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_3_1 = _T_168 ? (_GEN_35 ? _GEN_44 & _update_was_taken_T_2 | _GEN_16 : decr_mask[3] & _update_was_taken_T_2 | _GEN_16) : _GEN_16; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_3_2 = _T_168 ? (_GEN_35 ? _GEN_44 & _update_was_taken_T_4 | _GEN_23 : decr_mask[3] & _update_was_taken_T_4 | _GEN_23) : _GEN_23; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_3_3 = _T_168 ? (_GEN_35 ? _GEN_44 & (&s1_update_bits_cfi_idx_bits) | _GEN_30 : decr_mask[3] & (&s1_update_bits_cfi_idx_bits) | _GEN_30) : _GEN_30; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_3_0 = _T_168 & (_GEN_35 ? _GEN_62 : decr_mask[3] & _update_was_taken_T) ? 2'h0 : new_u; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_3_1 = _T_168 & (_GEN_35 ? _GEN_63 : decr_mask[3] & _update_was_taken_T_2) ? 2'h0 : new_u_1; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_3_2 = _T_168 & (_GEN_35 ? _GEN_64 : decr_mask[3] & _update_was_taken_T_4) ? 2'h0 : new_u_2; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_3_3 = _T_168 & (_GEN_35 ? _GEN_65 : decr_mask[3] & (&s1_update_bits_cfi_idx_bits)) ? 2'h0 : new_u_3; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_mask_4_0 = _T_168 ? (_GEN_35 ? _GEN_46 & _update_was_taken_T | _GEN_10 : decr_mask[4] & _update_was_taken_T | _GEN_10) : _GEN_10; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_4_1 = _T_168 ? (_GEN_35 ? _GEN_46 & _update_was_taken_T_2 | _GEN_17 : decr_mask[4] & _update_was_taken_T_2 | _GEN_17) : _GEN_17; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_4_2 = _T_168 ? (_GEN_35 ? _GEN_46 & _update_was_taken_T_4 | _GEN_24 : decr_mask[4] & _update_was_taken_T_4 | _GEN_24) : _GEN_24; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_4_3 = _T_168 ? (_GEN_35 ? _GEN_46 & (&s1_update_bits_cfi_idx_bits) | _GEN_31 : decr_mask[4] & (&s1_update_bits_cfi_idx_bits) | _GEN_31) : _GEN_31; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_4_0 = _T_168 & (_GEN_35 ? _GEN_66 : decr_mask[4] & _update_was_taken_T) ? 2'h0 : new_u; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_4_1 = _T_168 & (_GEN_35 ? _GEN_67 : decr_mask[4] & _update_was_taken_T_2) ? 2'h0 : new_u_1; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_4_2 = _T_168 & (_GEN_35 ? _GEN_68 : decr_mask[4] & _update_was_taken_T_4) ? 2'h0 : new_u_2; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_4_3 = _T_168 & (_GEN_35 ? _GEN_69 : decr_mask[4] & (&s1_update_bits_cfi_idx_bits)) ? 2'h0 : new_u_3; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_mask_5_0 = _T_168 ? (_GEN_35 ? _GEN_48 & _update_was_taken_T | _GEN_11 : decr_mask[5] & _update_was_taken_T | _GEN_11) : _GEN_11; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_5_1 = _T_168 ? (_GEN_35 ? _GEN_48 & _update_was_taken_T_2 | _GEN_18 : decr_mask[5] & _update_was_taken_T_2 | _GEN_18) : _GEN_18; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_5_2 = _T_168 ? (_GEN_35 ? _GEN_48 & _update_was_taken_T_4 | _GEN_25 : decr_mask[5] & _update_was_taken_T_4 | _GEN_25) : _GEN_25; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_mask_5_3 = _T_168 ? (_GEN_35 ? _GEN_48 & (&s1_update_bits_cfi_idx_bits) | _GEN_32 : decr_mask[5] & (&s1_update_bits_cfi_idx_bits) | _GEN_32) : _GEN_32; // @[tage.scala:252:33, :253:35, :317:58, :319:92, :320:47, :323:37, :337:{25,60,95,128}, :340:27, :341:43, :345:44, :350:26, :353:{24,29}, :354:36] assign s1_update_u_5_0 = _T_168 & (_GEN_35 ? _GEN_70 : decr_mask[5] & _update_was_taken_T) ? 2'h0 : new_u; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_5_1 = _T_168 & (_GEN_35 ? _GEN_71 : decr_mask[5] & _update_was_taken_T_2) ? 2'h0 : new_u_1; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_5_2 = _T_168 & (_GEN_35 ? _GEN_72 : decr_mask[5] & _update_was_taken_T_4) ? 2'h0 : new_u_2; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] assign s1_update_u_5_3 = _T_168 & (_GEN_35 ? _GEN_73 : decr_mask[5] & (&s1_update_bits_cfi_idx_bits)) ? 2'h0 : new_u_3; // @[tage.scala:228:8, :258:31, :317:58, :319:92, :337:{25,60,95,128}, :340:27, :342:43, :346:44, :350:26, :353:{24,29}, :355:36] reg tt_0_1_io_update_mask_0_REG; // @[tage.scala:365:48] reg tt_0_1_io_update_taken_0_REG; // @[tage.scala:366:48] reg tt_0_1_io_update_alloc_0_REG; // @[tage.scala:367:48] reg [2:0] tt_0_1_io_update_old_ctr_0_REG; // @[tage.scala:368:48] reg tt_0_1_io_update_u_mask_0_REG; // @[tage.scala:370:47] reg [1:0] tt_0_1_io_update_u_0_REG; // @[tage.scala:371:47] reg tt_0_1_io_update_mask_1_REG; // @[tage.scala:365:48] reg tt_0_1_io_update_taken_1_REG; // @[tage.scala:366:48] reg tt_0_1_io_update_alloc_1_REG; // @[tage.scala:367:48] reg [2:0] tt_0_1_io_update_old_ctr_1_REG; // @[tage.scala:368:48] reg tt_0_1_io_update_u_mask_1_REG; // @[tage.scala:370:47] reg [1:0] tt_0_1_io_update_u_1_REG; // @[tage.scala:371:47] reg tt_0_1_io_update_mask_2_REG; // @[tage.scala:365:48] reg tt_0_1_io_update_taken_2_REG; // @[tage.scala:366:48] reg tt_0_1_io_update_alloc_2_REG; // @[tage.scala:367:48] reg [2:0] tt_0_1_io_update_old_ctr_2_REG; // @[tage.scala:368:48] reg tt_0_1_io_update_u_mask_2_REG; // @[tage.scala:370:47] reg [1:0] tt_0_1_io_update_u_2_REG; // @[tage.scala:371:47] reg tt_0_1_io_update_mask_3_REG; // @[tage.scala:365:48] reg tt_0_1_io_update_taken_3_REG; // @[tage.scala:366:48] reg tt_0_1_io_update_alloc_3_REG; // @[tage.scala:367:48] reg [2:0] tt_0_1_io_update_old_ctr_3_REG; // @[tage.scala:368:48] reg tt_0_1_io_update_u_mask_3_REG; // @[tage.scala:370:47] reg [1:0] tt_0_1_io_update_u_3_REG; // @[tage.scala:371:47] reg [39:0] tt_0_1_io_update_pc_REG; // @[tage.scala:373:41] reg [63:0] tt_0_1_io_update_hist_REG; // @[tage.scala:374:41] reg tt_1_1_io_update_mask_0_REG; // @[tage.scala:365:48] reg tt_1_1_io_update_taken_0_REG; // @[tage.scala:366:48] reg tt_1_1_io_update_alloc_0_REG; // @[tage.scala:367:48] reg [2:0] tt_1_1_io_update_old_ctr_0_REG; // @[tage.scala:368:48] reg tt_1_1_io_update_u_mask_0_REG; // @[tage.scala:370:47] reg [1:0] tt_1_1_io_update_u_0_REG; // @[tage.scala:371:47] reg tt_1_1_io_update_mask_1_REG; // @[tage.scala:365:48] reg tt_1_1_io_update_taken_1_REG; // @[tage.scala:366:48] reg tt_1_1_io_update_alloc_1_REG; // @[tage.scala:367:48] reg [2:0] tt_1_1_io_update_old_ctr_1_REG; // @[tage.scala:368:48] reg tt_1_1_io_update_u_mask_1_REG; // @[tage.scala:370:47] reg [1:0] tt_1_1_io_update_u_1_REG; // @[tage.scala:371:47] reg tt_1_1_io_update_mask_2_REG; // @[tage.scala:365:48] reg tt_1_1_io_update_taken_2_REG; // @[tage.scala:366:48] reg tt_1_1_io_update_alloc_2_REG; // @[tage.scala:367:48] reg [2:0] tt_1_1_io_update_old_ctr_2_REG; // @[tage.scala:368:48] reg tt_1_1_io_update_u_mask_2_REG; // @[tage.scala:370:47] reg [1:0] tt_1_1_io_update_u_2_REG; // @[tage.scala:371:47] reg tt_1_1_io_update_mask_3_REG; // @[tage.scala:365:48] reg tt_1_1_io_update_taken_3_REG; // @[tage.scala:366:48] reg tt_1_1_io_update_alloc_3_REG; // @[tage.scala:367:48] reg [2:0] tt_1_1_io_update_old_ctr_3_REG; // @[tage.scala:368:48] reg tt_1_1_io_update_u_mask_3_REG; // @[tage.scala:370:47] reg [1:0] tt_1_1_io_update_u_3_REG; // @[tage.scala:371:47] reg [39:0] tt_1_1_io_update_pc_REG; // @[tage.scala:373:41] reg [63:0] tt_1_1_io_update_hist_REG; // @[tage.scala:374:41] reg tt_2_1_io_update_mask_0_REG; // @[tage.scala:365:48] reg tt_2_1_io_update_taken_0_REG; // @[tage.scala:366:48] reg tt_2_1_io_update_alloc_0_REG; // @[tage.scala:367:48] reg [2:0] tt_2_1_io_update_old_ctr_0_REG; // @[tage.scala:368:48] reg tt_2_1_io_update_u_mask_0_REG; // @[tage.scala:370:47] reg [1:0] tt_2_1_io_update_u_0_REG; // @[tage.scala:371:47] reg tt_2_1_io_update_mask_1_REG; // @[tage.scala:365:48] reg tt_2_1_io_update_taken_1_REG; // @[tage.scala:366:48] reg tt_2_1_io_update_alloc_1_REG; // @[tage.scala:367:48] reg [2:0] tt_2_1_io_update_old_ctr_1_REG; // @[tage.scala:368:48] reg tt_2_1_io_update_u_mask_1_REG; // @[tage.scala:370:47] reg [1:0] tt_2_1_io_update_u_1_REG; // @[tage.scala:371:47] reg tt_2_1_io_update_mask_2_REG; // @[tage.scala:365:48] reg tt_2_1_io_update_taken_2_REG; // @[tage.scala:366:48] reg tt_2_1_io_update_alloc_2_REG; // @[tage.scala:367:48] reg [2:0] tt_2_1_io_update_old_ctr_2_REG; // @[tage.scala:368:48] reg tt_2_1_io_update_u_mask_2_REG; // @[tage.scala:370:47] reg [1:0] tt_2_1_io_update_u_2_REG; // @[tage.scala:371:47] reg tt_2_1_io_update_mask_3_REG; // @[tage.scala:365:48] reg tt_2_1_io_update_taken_3_REG; // @[tage.scala:366:48] reg tt_2_1_io_update_alloc_3_REG; // @[tage.scala:367:48] reg [2:0] tt_2_1_io_update_old_ctr_3_REG; // @[tage.scala:368:48] reg tt_2_1_io_update_u_mask_3_REG; // @[tage.scala:370:47] reg [1:0] tt_2_1_io_update_u_3_REG; // @[tage.scala:371:47] reg [39:0] tt_2_1_io_update_pc_REG; // @[tage.scala:373:41] reg [63:0] tt_2_1_io_update_hist_REG; // @[tage.scala:374:41] reg tt_3_1_io_update_mask_0_REG; // @[tage.scala:365:48] reg tt_3_1_io_update_taken_0_REG; // @[tage.scala:366:48] reg tt_3_1_io_update_alloc_0_REG; // @[tage.scala:367:48] reg [2:0] tt_3_1_io_update_old_ctr_0_REG; // @[tage.scala:368:48] reg tt_3_1_io_update_u_mask_0_REG; // @[tage.scala:370:47] reg [1:0] tt_3_1_io_update_u_0_REG; // @[tage.scala:371:47] reg tt_3_1_io_update_mask_1_REG; // @[tage.scala:365:48] reg tt_3_1_io_update_taken_1_REG; // @[tage.scala:366:48] reg tt_3_1_io_update_alloc_1_REG; // @[tage.scala:367:48] reg [2:0] tt_3_1_io_update_old_ctr_1_REG; // @[tage.scala:368:48] reg tt_3_1_io_update_u_mask_1_REG; // @[tage.scala:370:47] reg [1:0] tt_3_1_io_update_u_1_REG; // @[tage.scala:371:47] reg tt_3_1_io_update_mask_2_REG; // @[tage.scala:365:48] reg tt_3_1_io_update_taken_2_REG; // @[tage.scala:366:48] reg tt_3_1_io_update_alloc_2_REG; // @[tage.scala:367:48] reg [2:0] tt_3_1_io_update_old_ctr_2_REG; // @[tage.scala:368:48] reg tt_3_1_io_update_u_mask_2_REG; // @[tage.scala:370:47] reg [1:0] tt_3_1_io_update_u_2_REG; // @[tage.scala:371:47] reg tt_3_1_io_update_mask_3_REG; // @[tage.scala:365:48] reg tt_3_1_io_update_taken_3_REG; // @[tage.scala:366:48] reg tt_3_1_io_update_alloc_3_REG; // @[tage.scala:367:48] reg [2:0] tt_3_1_io_update_old_ctr_3_REG; // @[tage.scala:368:48] reg tt_3_1_io_update_u_mask_3_REG; // @[tage.scala:370:47] reg [1:0] tt_3_1_io_update_u_3_REG; // @[tage.scala:371:47] reg [39:0] tt_3_1_io_update_pc_REG; // @[tage.scala:373:41] reg [63:0] tt_3_1_io_update_hist_REG; // @[tage.scala:374:41] reg tt_4_1_io_update_mask_0_REG; // @[tage.scala:365:48] reg tt_4_1_io_update_taken_0_REG; // @[tage.scala:366:48] reg tt_4_1_io_update_alloc_0_REG; // @[tage.scala:367:48] reg [2:0] tt_4_1_io_update_old_ctr_0_REG; // @[tage.scala:368:48] reg tt_4_1_io_update_u_mask_0_REG; // @[tage.scala:370:47] reg [1:0] tt_4_1_io_update_u_0_REG; // @[tage.scala:371:47] reg tt_4_1_io_update_mask_1_REG; // @[tage.scala:365:48] reg tt_4_1_io_update_taken_1_REG; // @[tage.scala:366:48] reg tt_4_1_io_update_alloc_1_REG; // @[tage.scala:367:48] reg [2:0] tt_4_1_io_update_old_ctr_1_REG; // @[tage.scala:368:48] reg tt_4_1_io_update_u_mask_1_REG; // @[tage.scala:370:47] reg [1:0] tt_4_1_io_update_u_1_REG; // @[tage.scala:371:47] reg tt_4_1_io_update_mask_2_REG; // @[tage.scala:365:48] reg tt_4_1_io_update_taken_2_REG; // @[tage.scala:366:48] reg tt_4_1_io_update_alloc_2_REG; // @[tage.scala:367:48] reg [2:0] tt_4_1_io_update_old_ctr_2_REG; // @[tage.scala:368:48] reg tt_4_1_io_update_u_mask_2_REG; // @[tage.scala:370:47] reg [1:0] tt_4_1_io_update_u_2_REG; // @[tage.scala:371:47] reg tt_4_1_io_update_mask_3_REG; // @[tage.scala:365:48] reg tt_4_1_io_update_taken_3_REG; // @[tage.scala:366:48] reg tt_4_1_io_update_alloc_3_REG; // @[tage.scala:367:48] reg [2:0] tt_4_1_io_update_old_ctr_3_REG; // @[tage.scala:368:48] reg tt_4_1_io_update_u_mask_3_REG; // @[tage.scala:370:47] reg [1:0] tt_4_1_io_update_u_3_REG; // @[tage.scala:371:47] reg [39:0] tt_4_1_io_update_pc_REG; // @[tage.scala:373:41] reg [63:0] tt_4_1_io_update_hist_REG; // @[tage.scala:374:41] reg tt_5_1_io_update_mask_0_REG; // @[tage.scala:365:48] reg tt_5_1_io_update_taken_0_REG; // @[tage.scala:366:48] reg tt_5_1_io_update_alloc_0_REG; // @[tage.scala:367:48] reg [2:0] tt_5_1_io_update_old_ctr_0_REG; // @[tage.scala:368:48] reg tt_5_1_io_update_u_mask_0_REG; // @[tage.scala:370:47] reg [1:0] tt_5_1_io_update_u_0_REG; // @[tage.scala:371:47] reg tt_5_1_io_update_mask_1_REG; // @[tage.scala:365:48] reg tt_5_1_io_update_taken_1_REG; // @[tage.scala:366:48] reg tt_5_1_io_update_alloc_1_REG; // @[tage.scala:367:48] reg [2:0] tt_5_1_io_update_old_ctr_1_REG; // @[tage.scala:368:48] reg tt_5_1_io_update_u_mask_1_REG; // @[tage.scala:370:47] reg [1:0] tt_5_1_io_update_u_1_REG; // @[tage.scala:371:47] reg tt_5_1_io_update_mask_2_REG; // @[tage.scala:365:48] reg tt_5_1_io_update_taken_2_REG; // @[tage.scala:366:48] reg tt_5_1_io_update_alloc_2_REG; // @[tage.scala:367:48] reg [2:0] tt_5_1_io_update_old_ctr_2_REG; // @[tage.scala:368:48] reg tt_5_1_io_update_u_mask_2_REG; // @[tage.scala:370:47] reg [1:0] tt_5_1_io_update_u_2_REG; // @[tage.scala:371:47] reg tt_5_1_io_update_mask_3_REG; // @[tage.scala:365:48] reg tt_5_1_io_update_taken_3_REG; // @[tage.scala:366:48] reg tt_5_1_io_update_alloc_3_REG; // @[tage.scala:367:48] reg [2:0] tt_5_1_io_update_old_ctr_3_REG; // @[tage.scala:368:48] reg tt_5_1_io_update_u_mask_3_REG; // @[tage.scala:370:47] reg [1:0] tt_5_1_io_update_u_3_REG; // @[tage.scala:371:47] reg [39:0] tt_5_1_io_update_pc_REG; // @[tage.scala:373:41] reg [63:0] tt_5_1_io_update_hist_REG; // @[tage.scala:374:41] wire [7:0] io_f3_meta_lo = {_io_f3_meta_T_1, _io_f3_meta_T}; // @[tage.scala:379:25] wire [7:0] io_f3_meta_hi = {_io_f3_meta_T_3, _io_f3_meta_T_2}; // @[tage.scala:379:25] wire [15:0] _io_f3_meta_T_4 = {io_f3_meta_hi, io_f3_meta_lo}; // @[tage.scala:379:25] wire [11:0] _io_f3_meta_T_5 = {io_f3_meta_hi_1, io_f3_meta_lo_1}; // @[tage.scala:379:25] wire [7:0] _io_f3_meta_T_6 = {io_f3_meta_hi_2, io_f3_meta_lo_2}; // @[tage.scala:379:25] wire [3:0] _io_f3_meta_T_7 = {io_f3_meta_hi_3, io_f3_meta_lo_3}; // @[tage.scala:379:25] wire [7:0] io_f3_meta_lo_4 = {_io_f3_meta_T_9, _io_f3_meta_T_8}; // @[tage.scala:379:25] wire [7:0] io_f3_meta_hi_4 = {_io_f3_meta_T_11, _io_f3_meta_T_10}; // @[tage.scala:379:25] wire [15:0] _io_f3_meta_T_12 = {io_f3_meta_hi_4, io_f3_meta_lo_4}; // @[tage.scala:379:25] wire [27:0] io_f3_meta_lo_5 = {_io_f3_meta_T_5, _io_f3_meta_T_4}; // @[tage.scala:379:25] wire [19:0] io_f3_meta_hi_hi = {_io_f3_meta_T_12, _io_f3_meta_T_7}; // @[tage.scala:379:25] wire [27:0] io_f3_meta_hi_5 = {io_f3_meta_hi_hi, _io_f3_meta_T_6}; // @[tage.scala:379:25] wire [55:0] _io_f3_meta_T_13 = {io_f3_meta_hi_5, io_f3_meta_lo_5}; // @[tage.scala:379:25] assign io_f3_meta_0 = {64'h0, _io_f3_meta_T_13}; // @[tage.scala:209:7, :379:{14,25}] wire _T_21 = f2_resps_0_0_valid | f2_resps_1_0_valid; // @[tage.scala:245:25, :274:33] wire _T_26 = _T_21 | f2_resps_2_0_valid; // @[tage.scala:245:25, :274:33] wire [1:0] _T_28 = f2_resps_2_0_valid ? 2'h2 : {1'h0, f2_resps_1_0_valid}; // @[tage.scala:245:25, :277:24] wire _T_31 = _T_26 | f2_resps_3_0_valid; // @[tage.scala:245:25, :274:33] wire [1:0] _T_33 = f2_resps_3_0_valid ? 2'h3 : _T_28; // @[tage.scala:245:25, :277:24] wire _T_36 = _T_31 | f2_resps_4_0_valid; // @[tage.scala:245:25, :274:33] wire [2:0] _T_38 = f2_resps_4_0_valid ? 3'h4 : {1'h0, _T_33}; // @[tage.scala:245:25, :277:24] wire [2:0] _T_42 = f2_resps_5_0_valid ? _T_38 : {1'h0, f2_resps_4_0_valid ? _T_33 : f2_resps_3_0_valid ? _T_28 : {1'h0, f2_resps_2_0_valid & f2_resps_1_0_valid}}; // @[tage.scala:245:25, :276:28, :277:24] wire [2:0] _T_43 = f2_resps_5_0_valid ? 3'h5 : _T_38; // @[tage.scala:245:25, :277:24] wire [7:0][2:0] _GEN_76 = {{f2_resps_0_0_bits_ctr}, {f2_resps_0_0_bits_ctr}, {f2_resps_5_0_bits_ctr}, {f2_resps_4_0_bits_ctr}, {f2_resps_3_0_bits_ctr}, {f2_resps_2_0_bits_ctr}, {f2_resps_1_0_bits_ctr}, {f2_resps_0_0_bits_ctr}}; // @[tage.scala:245:25, :284:23] wire [7:0][1:0] _GEN_77 = {{f2_resps_0_0_bits_u}, {f2_resps_0_0_bits_u}, {f2_resps_5_0_bits_u}, {f2_resps_4_0_bits_u}, {f2_resps_3_0_bits_u}, {f2_resps_2_0_bits_u}, {f2_resps_1_0_bits_u}, {f2_resps_0_0_bits_u}}; // @[tage.scala:245:25, :284:23] wire _T_58 = f2_resps_0_1_valid | f2_resps_1_1_valid; // @[tage.scala:245:25, :274:33] wire _T_63 = _T_58 | f2_resps_2_1_valid; // @[tage.scala:245:25, :274:33] wire [1:0] _T_65 = f2_resps_2_1_valid ? 2'h2 : {1'h0, f2_resps_1_1_valid}; // @[tage.scala:245:25, :277:24] wire _T_68 = _T_63 | f2_resps_3_1_valid; // @[tage.scala:245:25, :274:33] wire [1:0] _T_70 = f2_resps_3_1_valid ? 2'h3 : _T_65; // @[tage.scala:245:25, :277:24] wire _T_73 = _T_68 | f2_resps_4_1_valid; // @[tage.scala:245:25, :274:33] wire [2:0] _T_75 = f2_resps_4_1_valid ? 3'h4 : {1'h0, _T_70}; // @[tage.scala:245:25, :277:24] wire [2:0] _T_79 = f2_resps_5_1_valid ? _T_75 : {1'h0, f2_resps_4_1_valid ? _T_70 : f2_resps_3_1_valid ? _T_65 : {1'h0, f2_resps_2_1_valid & f2_resps_1_1_valid}}; // @[tage.scala:245:25, :276:28, :277:24] wire [2:0] _T_80 = f2_resps_5_1_valid ? 3'h5 : _T_75; // @[tage.scala:245:25, :277:24] wire [7:0][2:0] _GEN_78 = {{f2_resps_0_1_bits_ctr}, {f2_resps_0_1_bits_ctr}, {f2_resps_5_1_bits_ctr}, {f2_resps_4_1_bits_ctr}, {f2_resps_3_1_bits_ctr}, {f2_resps_2_1_bits_ctr}, {f2_resps_1_1_bits_ctr}, {f2_resps_0_1_bits_ctr}}; // @[tage.scala:245:25, :284:23] wire [7:0][1:0] _GEN_79 = {{f2_resps_0_1_bits_u}, {f2_resps_0_1_bits_u}, {f2_resps_5_1_bits_u}, {f2_resps_4_1_bits_u}, {f2_resps_3_1_bits_u}, {f2_resps_2_1_bits_u}, {f2_resps_1_1_bits_u}, {f2_resps_0_1_bits_u}}; // @[tage.scala:245:25, :284:23] wire _T_95 = f2_resps_0_2_valid | f2_resps_1_2_valid; // @[tage.scala:245:25, :274:33] wire _T_100 = _T_95 | f2_resps_2_2_valid; // @[tage.scala:245:25, :274:33] wire [1:0] _T_102 = f2_resps_2_2_valid ? 2'h2 : {1'h0, f2_resps_1_2_valid}; // @[tage.scala:245:25, :277:24] wire _T_105 = _T_100 | f2_resps_3_2_valid; // @[tage.scala:245:25, :274:33] wire [1:0] _T_107 = f2_resps_3_2_valid ? 2'h3 : _T_102; // @[tage.scala:245:25, :277:24] wire _T_110 = _T_105 | f2_resps_4_2_valid; // @[tage.scala:245:25, :274:33] wire [2:0] _T_112 = f2_resps_4_2_valid ? 3'h4 : {1'h0, _T_107}; // @[tage.scala:245:25, :277:24] wire [2:0] _T_116 = f2_resps_5_2_valid ? _T_112 : {1'h0, f2_resps_4_2_valid ? _T_107 : f2_resps_3_2_valid ? _T_102 : {1'h0, f2_resps_2_2_valid & f2_resps_1_2_valid}}; // @[tage.scala:245:25, :276:28, :277:24] wire [2:0] _T_117 = f2_resps_5_2_valid ? 3'h5 : _T_112; // @[tage.scala:245:25, :277:24] wire [7:0][2:0] _GEN_80 = {{f2_resps_0_2_bits_ctr}, {f2_resps_0_2_bits_ctr}, {f2_resps_5_2_bits_ctr}, {f2_resps_4_2_bits_ctr}, {f2_resps_3_2_bits_ctr}, {f2_resps_2_2_bits_ctr}, {f2_resps_1_2_bits_ctr}, {f2_resps_0_2_bits_ctr}}; // @[tage.scala:245:25, :284:23] wire [7:0][1:0] _GEN_81 = {{f2_resps_0_2_bits_u}, {f2_resps_0_2_bits_u}, {f2_resps_5_2_bits_u}, {f2_resps_4_2_bits_u}, {f2_resps_3_2_bits_u}, {f2_resps_2_2_bits_u}, {f2_resps_1_2_bits_u}, {f2_resps_0_2_bits_u}}; // @[tage.scala:245:25, :284:23] wire _T_132 = f2_resps_0_3_valid | f2_resps_1_3_valid; // @[tage.scala:245:25, :274:33] wire _T_137 = _T_132 | f2_resps_2_3_valid; // @[tage.scala:245:25, :274:33] wire [1:0] _T_139 = f2_resps_2_3_valid ? 2'h2 : {1'h0, f2_resps_1_3_valid}; // @[tage.scala:245:25, :277:24] wire _T_142 = _T_137 | f2_resps_3_3_valid; // @[tage.scala:245:25, :274:33] wire [1:0] _T_144 = f2_resps_3_3_valid ? 2'h3 : _T_139; // @[tage.scala:245:25, :277:24] wire _T_147 = _T_142 | f2_resps_4_3_valid; // @[tage.scala:245:25, :274:33] wire [2:0] _T_149 = f2_resps_4_3_valid ? 3'h4 : {1'h0, _T_144}; // @[tage.scala:245:25, :277:24] wire [2:0] _T_153 = f2_resps_5_3_valid ? _T_149 : {1'h0, f2_resps_4_3_valid ? _T_144 : f2_resps_3_3_valid ? _T_139 : {1'h0, f2_resps_2_3_valid & f2_resps_1_3_valid}}; // @[tage.scala:245:25, :276:28, :277:24] wire [2:0] _T_154 = f2_resps_5_3_valid ? 3'h5 : _T_149; // @[tage.scala:245:25, :277:24] wire [7:0][2:0] _GEN_82 = {{f2_resps_0_3_bits_ctr}, {f2_resps_0_3_bits_ctr}, {f2_resps_5_3_bits_ctr}, {f2_resps_4_3_bits_ctr}, {f2_resps_3_3_bits_ctr}, {f2_resps_2_3_bits_ctr}, {f2_resps_1_3_bits_ctr}, {f2_resps_0_3_bits_ctr}}; // @[tage.scala:245:25, :284:23] wire [7:0][1:0] _GEN_83 = {{f2_resps_0_3_bits_u}, {f2_resps_0_3_bits_u}, {f2_resps_5_3_bits_u}, {f2_resps_4_3_bits_u}, {f2_resps_3_3_bits_u}, {f2_resps_2_3_bits_u}, {f2_resps_1_3_bits_u}, {f2_resps_0_3_bits_u}}; // @[tage.scala:245:25, :284:23] always @(posedge clock) begin // @[tage.scala:209:7] s1_idx <= s0_idx; // @[frontend.scala:149:35] s2_idx <= s1_idx; // @[predictor.scala:163:29, :164:29] s3_idx <= s2_idx; // @[predictor.scala:164:29, :165:29] s1_valid <= io_f0_valid_0; // @[tage.scala:209:7] s2_valid <= s1_valid; // @[predictor.scala:168:25, :169:25] s3_valid <= s2_valid; // @[predictor.scala:169:25, :170:25] s1_mask <= io_f0_mask_0; // @[tage.scala:209:7] s2_mask <= s1_mask; // @[predictor.scala:173:24, :174:24] s3_mask <= s2_mask; // @[predictor.scala:174:24, :175:24] s1_pc <= s0_pc; // @[frontend.scala:147:31] s2_pc <= s1_pc; // @[predictor.scala:178:22, :179:22] s1_update_valid <= io_update_valid_0; // @[tage.scala:209:7] s1_update_bits_is_mispredict_update <= io_update_bits_is_mispredict_update_0; // @[tage.scala:209:7] s1_update_bits_is_repair_update <= io_update_bits_is_repair_update_0; // @[tage.scala:209:7] s1_update_bits_btb_mispredicts <= io_update_bits_btb_mispredicts_0; // @[tage.scala:209:7] s1_update_bits_pc <= _s1_update_bits_pc_T_2; // @[frontend.scala:147:31] s1_update_bits_br_mask <= io_update_bits_br_mask_0; // @[tage.scala:209:7] s1_update_bits_cfi_idx_valid <= io_update_bits_cfi_idx_valid_0; // @[tage.scala:209:7] s1_update_bits_cfi_idx_bits <= io_update_bits_cfi_idx_bits_0; // @[tage.scala:209:7] s1_update_bits_cfi_taken <= io_update_bits_cfi_taken_0; // @[tage.scala:209:7] s1_update_bits_cfi_mispredicted <= io_update_bits_cfi_mispredicted_0; // @[tage.scala:209:7] s1_update_bits_cfi_is_br <= io_update_bits_cfi_is_br_0; // @[tage.scala:209:7] s1_update_bits_cfi_is_jal <= io_update_bits_cfi_is_jal_0; // @[tage.scala:209:7] s1_update_bits_cfi_is_jalr <= io_update_bits_cfi_is_jalr_0; // @[tage.scala:209:7] s1_update_bits_ghist <= io_update_bits_ghist_0; // @[tage.scala:209:7] s1_update_bits_lhist <= io_update_bits_lhist_0; // @[tage.scala:209:7] s1_update_bits_target <= io_update_bits_target_0; // @[tage.scala:209:7] s1_update_bits_meta <= io_update_bits_meta_0; // @[tage.scala:209:7] s1_update_idx <= s0_update_idx; // @[frontend.scala:149:35] s1_update_valid_0 <= io_update_valid_0; // @[tage.scala:209:7] t_io_f1_req_valid_REG <= io_f0_valid_0; // @[tage.scala:209:7, :236:35] t_io_f1_req_pc_REG <= _t_io_f1_req_pc_T_2; // @[frontend.scala:147:31] t_io_f1_req_valid_REG_1 <= io_f0_valid_0; // @[tage.scala:209:7, :236:35] t_io_f1_req_pc_REG_1 <= _t_io_f1_req_pc_T_5; // @[frontend.scala:147:31] t_io_f1_req_valid_REG_2 <= io_f0_valid_0; // @[tage.scala:209:7, :236:35] t_io_f1_req_pc_REG_2 <= _t_io_f1_req_pc_T_8; // @[frontend.scala:147:31] t_io_f1_req_valid_REG_3 <= io_f0_valid_0; // @[tage.scala:209:7, :236:35] t_io_f1_req_pc_REG_3 <= _t_io_f1_req_pc_T_11; // @[frontend.scala:147:31] t_io_f1_req_valid_REG_4 <= io_f0_valid_0; // @[tage.scala:209:7, :236:35] t_io_f1_req_pc_REG_4 <= _t_io_f1_req_pc_T_14; // @[frontend.scala:147:31] t_io_f1_req_valid_REG_5 <= io_f0_valid_0; // @[tage.scala:209:7, :236:35] t_io_f1_req_pc_REG_5 <= _t_io_f1_req_pc_T_17; // @[frontend.scala:147:31] f3_resps_0_0_valid <= f2_resps_0_0_valid; // @[tage.scala:245:25, :246:25] f3_resps_0_0_bits_ctr <= f2_resps_0_0_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_0_0_bits_u <= f2_resps_0_0_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_0_1_valid <= f2_resps_0_1_valid; // @[tage.scala:245:25, :246:25] f3_resps_0_1_bits_ctr <= f2_resps_0_1_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_0_1_bits_u <= f2_resps_0_1_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_0_2_valid <= f2_resps_0_2_valid; // @[tage.scala:245:25, :246:25] f3_resps_0_2_bits_ctr <= f2_resps_0_2_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_0_2_bits_u <= f2_resps_0_2_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_0_3_valid <= f2_resps_0_3_valid; // @[tage.scala:245:25, :246:25] f3_resps_0_3_bits_ctr <= f2_resps_0_3_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_0_3_bits_u <= f2_resps_0_3_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_1_0_valid <= f2_resps_1_0_valid; // @[tage.scala:245:25, :246:25] f3_resps_1_0_bits_ctr <= f2_resps_1_0_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_1_0_bits_u <= f2_resps_1_0_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_1_1_valid <= f2_resps_1_1_valid; // @[tage.scala:245:25, :246:25] f3_resps_1_1_bits_ctr <= f2_resps_1_1_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_1_1_bits_u <= f2_resps_1_1_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_1_2_valid <= f2_resps_1_2_valid; // @[tage.scala:245:25, :246:25] f3_resps_1_2_bits_ctr <= f2_resps_1_2_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_1_2_bits_u <= f2_resps_1_2_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_1_3_valid <= f2_resps_1_3_valid; // @[tage.scala:245:25, :246:25] f3_resps_1_3_bits_ctr <= f2_resps_1_3_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_1_3_bits_u <= f2_resps_1_3_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_2_0_valid <= f2_resps_2_0_valid; // @[tage.scala:245:25, :246:25] f3_resps_2_0_bits_ctr <= f2_resps_2_0_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_2_0_bits_u <= f2_resps_2_0_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_2_1_valid <= f2_resps_2_1_valid; // @[tage.scala:245:25, :246:25] f3_resps_2_1_bits_ctr <= f2_resps_2_1_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_2_1_bits_u <= f2_resps_2_1_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_2_2_valid <= f2_resps_2_2_valid; // @[tage.scala:245:25, :246:25] f3_resps_2_2_bits_ctr <= f2_resps_2_2_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_2_2_bits_u <= f2_resps_2_2_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_2_3_valid <= f2_resps_2_3_valid; // @[tage.scala:245:25, :246:25] f3_resps_2_3_bits_ctr <= f2_resps_2_3_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_2_3_bits_u <= f2_resps_2_3_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_3_0_valid <= f2_resps_3_0_valid; // @[tage.scala:245:25, :246:25] f3_resps_3_0_bits_ctr <= f2_resps_3_0_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_3_0_bits_u <= f2_resps_3_0_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_3_1_valid <= f2_resps_3_1_valid; // @[tage.scala:245:25, :246:25] f3_resps_3_1_bits_ctr <= f2_resps_3_1_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_3_1_bits_u <= f2_resps_3_1_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_3_2_valid <= f2_resps_3_2_valid; // @[tage.scala:245:25, :246:25] f3_resps_3_2_bits_ctr <= f2_resps_3_2_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_3_2_bits_u <= f2_resps_3_2_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_3_3_valid <= f2_resps_3_3_valid; // @[tage.scala:245:25, :246:25] f3_resps_3_3_bits_ctr <= f2_resps_3_3_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_3_3_bits_u <= f2_resps_3_3_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_4_0_valid <= f2_resps_4_0_valid; // @[tage.scala:245:25, :246:25] f3_resps_4_0_bits_ctr <= f2_resps_4_0_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_4_0_bits_u <= f2_resps_4_0_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_4_1_valid <= f2_resps_4_1_valid; // @[tage.scala:245:25, :246:25] f3_resps_4_1_bits_ctr <= f2_resps_4_1_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_4_1_bits_u <= f2_resps_4_1_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_4_2_valid <= f2_resps_4_2_valid; // @[tage.scala:245:25, :246:25] f3_resps_4_2_bits_ctr <= f2_resps_4_2_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_4_2_bits_u <= f2_resps_4_2_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_4_3_valid <= f2_resps_4_3_valid; // @[tage.scala:245:25, :246:25] f3_resps_4_3_bits_ctr <= f2_resps_4_3_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_4_3_bits_u <= f2_resps_4_3_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_5_0_valid <= f2_resps_5_0_valid; // @[tage.scala:245:25, :246:25] f3_resps_5_0_bits_ctr <= f2_resps_5_0_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_5_0_bits_u <= f2_resps_5_0_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_5_1_valid <= f2_resps_5_1_valid; // @[tage.scala:245:25, :246:25] f3_resps_5_1_bits_ctr <= f2_resps_5_1_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_5_1_bits_u <= f2_resps_5_1_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_5_2_valid <= f2_resps_5_2_valid; // @[tage.scala:245:25, :246:25] f3_resps_5_2_bits_ctr <= f2_resps_5_2_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_5_2_bits_u <= f2_resps_5_2_bits_u; // @[tage.scala:245:25, :246:25] f3_resps_5_3_valid <= f2_resps_5_3_valid; // @[tage.scala:245:25, :246:25] f3_resps_5_3_bits_ctr <= f2_resps_5_3_bits_ctr; // @[tage.scala:245:25, :246:25] f3_resps_5_3_bits_u <= f2_resps_5_3_bits_u; // @[tage.scala:245:25, :246:25] s3_provided <= _T_36 | f2_resps_5_0_valid; // @[tage.scala:245:25, :274:33, :279:30] s3_provider <= _T_43; // @[tage.scala:277:24, :280:30] s3_alt_provided <= f2_resps_0_0_valid & f2_resps_1_0_valid | _T_21 & f2_resps_2_0_valid | _T_26 & f2_resps_3_0_valid | _T_31 & f2_resps_4_0_valid | _T_36 & f2_resps_5_0_valid; // @[tage.scala:245:25, :273:{41,57}, :274:33, :281:34] s3_alt_provider <= _T_42; // @[tage.scala:276:28, :282:34] prov_ctr <= _GEN_76[_T_43]; // @[tage.scala:277:24, :284:23] prov_u <= _GEN_77[_T_43]; // @[tage.scala:277:24, :284:23] alt_ctr <= _GEN_76[_T_42]; // @[tage.scala:276:28, :284:23, :285:23] alt_u <= _GEN_77[_T_42]; // @[tage.scala:276:28, :284:23, :285:23] s3_provided_1 <= _T_73 | f2_resps_5_1_valid; // @[tage.scala:245:25, :274:33, :279:30] s3_provider_1 <= _T_80; // @[tage.scala:277:24, :280:30] s3_alt_provided_1 <= f2_resps_0_1_valid & f2_resps_1_1_valid | _T_58 & f2_resps_2_1_valid | _T_63 & f2_resps_3_1_valid | _T_68 & f2_resps_4_1_valid | _T_73 & f2_resps_5_1_valid; // @[tage.scala:245:25, :273:{41,57}, :274:33, :281:34] s3_alt_provider_1 <= _T_79; // @[tage.scala:276:28, :282:34] prov_1_ctr <= _GEN_78[_T_80]; // @[tage.scala:277:24, :284:23] prov_1_u <= _GEN_79[_T_80]; // @[tage.scala:277:24, :284:23] alt_1_ctr <= _GEN_78[_T_79]; // @[tage.scala:276:28, :284:23, :285:23] alt_1_u <= _GEN_79[_T_79]; // @[tage.scala:276:28, :284:23, :285:23] s3_provided_2 <= _T_110 | f2_resps_5_2_valid; // @[tage.scala:245:25, :274:33, :279:30] s3_provider_2 <= _T_117; // @[tage.scala:277:24, :280:30] s3_alt_provided_2 <= f2_resps_0_2_valid & f2_resps_1_2_valid | _T_95 & f2_resps_2_2_valid | _T_100 & f2_resps_3_2_valid | _T_105 & f2_resps_4_2_valid | _T_110 & f2_resps_5_2_valid; // @[tage.scala:245:25, :273:{41,57}, :274:33, :281:34] s3_alt_provider_2 <= _T_116; // @[tage.scala:276:28, :282:34] prov_2_ctr <= _GEN_80[_T_117]; // @[tage.scala:277:24, :284:23] prov_2_u <= _GEN_81[_T_117]; // @[tage.scala:277:24, :284:23] alt_2_ctr <= _GEN_80[_T_116]; // @[tage.scala:276:28, :284:23, :285:23] alt_2_u <= _GEN_81[_T_116]; // @[tage.scala:276:28, :284:23, :285:23] s3_provided_3 <= _T_147 | f2_resps_5_3_valid; // @[tage.scala:245:25, :274:33, :279:30] s3_provider_3 <= _T_154; // @[tage.scala:277:24, :280:30] s3_alt_provided_3 <= f2_resps_0_3_valid & f2_resps_1_3_valid | _T_132 & f2_resps_2_3_valid | _T_137 & f2_resps_3_3_valid | _T_142 & f2_resps_4_3_valid | _T_147 & f2_resps_5_3_valid; // @[tage.scala:245:25, :273:{41,57}, :274:33, :281:34] s3_alt_provider_3 <= _T_153; // @[tage.scala:276:28, :282:34] prov_3_ctr <= _GEN_82[_T_154]; // @[tage.scala:277:24, :284:23] prov_3_u <= _GEN_83[_T_154]; // @[tage.scala:277:24, :284:23] alt_3_ctr <= _GEN_82[_T_153]; // @[tage.scala:276:28, :284:23, :285:23] alt_3_u <= _GEN_83[_T_153]; // @[tage.scala:276:28, :284:23, :285:23] tt_0_1_io_update_mask_0_REG <= s1_update_mask_0_0; // @[tage.scala:252:33, :365:48] tt_0_1_io_update_taken_0_REG <= s1_update_taken_0_0; // @[tage.scala:255:31, :366:48] tt_0_1_io_update_alloc_0_REG <= s1_update_alloc_0_0; // @[tage.scala:257:31, :367:48] tt_0_1_io_update_old_ctr_0_REG <= s1_update_old_ctr_0_0; // @[tage.scala:256:31, :368:48] tt_0_1_io_update_u_mask_0_REG <= s1_update_u_mask_0_0; // @[tage.scala:253:35, :370:47] tt_0_1_io_update_u_0_REG <= s1_update_u_0_0; // @[tage.scala:258:31, :371:47] tt_0_1_io_update_mask_1_REG <= s1_update_mask_0_1; // @[tage.scala:252:33, :365:48] tt_0_1_io_update_taken_1_REG <= s1_update_taken_0_1; // @[tage.scala:255:31, :366:48] tt_0_1_io_update_alloc_1_REG <= s1_update_alloc_0_1; // @[tage.scala:257:31, :367:48] tt_0_1_io_update_old_ctr_1_REG <= s1_update_old_ctr_0_1; // @[tage.scala:256:31, :368:48] tt_0_1_io_update_u_mask_1_REG <= s1_update_u_mask_0_1; // @[tage.scala:253:35, :370:47] tt_0_1_io_update_u_1_REG <= s1_update_u_0_1; // @[tage.scala:258:31, :371:47] tt_0_1_io_update_mask_2_REG <= s1_update_mask_0_2; // @[tage.scala:252:33, :365:48] tt_0_1_io_update_taken_2_REG <= s1_update_taken_0_2; // @[tage.scala:255:31, :366:48] tt_0_1_io_update_alloc_2_REG <= s1_update_alloc_0_2; // @[tage.scala:257:31, :367:48] tt_0_1_io_update_old_ctr_2_REG <= s1_update_old_ctr_0_2; // @[tage.scala:256:31, :368:48] tt_0_1_io_update_u_mask_2_REG <= s1_update_u_mask_0_2; // @[tage.scala:253:35, :370:47] tt_0_1_io_update_u_2_REG <= s1_update_u_0_2; // @[tage.scala:258:31, :371:47] tt_0_1_io_update_mask_3_REG <= s1_update_mask_0_3; // @[tage.scala:252:33, :365:48] tt_0_1_io_update_taken_3_REG <= s1_update_taken_0_3; // @[tage.scala:255:31, :366:48] tt_0_1_io_update_alloc_3_REG <= s1_update_alloc_0_3; // @[tage.scala:257:31, :367:48] tt_0_1_io_update_old_ctr_3_REG <= s1_update_old_ctr_0_3; // @[tage.scala:256:31, :368:48] tt_0_1_io_update_u_mask_3_REG <= s1_update_u_mask_0_3; // @[tage.scala:253:35, :370:47] tt_0_1_io_update_u_3_REG <= s1_update_u_0_3; // @[tage.scala:258:31, :371:47] tt_0_1_io_update_pc_REG <= s1_update_bits_pc; // @[tage.scala:373:41] tt_0_1_io_update_hist_REG <= s1_update_bits_ghist; // @[tage.scala:374:41] tt_1_1_io_update_mask_0_REG <= s1_update_mask_1_0; // @[tage.scala:252:33, :365:48] tt_1_1_io_update_taken_0_REG <= s1_update_taken_1_0; // @[tage.scala:255:31, :366:48] tt_1_1_io_update_alloc_0_REG <= s1_update_alloc_1_0; // @[tage.scala:257:31, :367:48] tt_1_1_io_update_old_ctr_0_REG <= s1_update_old_ctr_1_0; // @[tage.scala:256:31, :368:48] tt_1_1_io_update_u_mask_0_REG <= s1_update_u_mask_1_0; // @[tage.scala:253:35, :370:47] tt_1_1_io_update_u_0_REG <= s1_update_u_1_0; // @[tage.scala:258:31, :371:47] tt_1_1_io_update_mask_1_REG <= s1_update_mask_1_1; // @[tage.scala:252:33, :365:48] tt_1_1_io_update_taken_1_REG <= s1_update_taken_1_1; // @[tage.scala:255:31, :366:48] tt_1_1_io_update_alloc_1_REG <= s1_update_alloc_1_1; // @[tage.scala:257:31, :367:48] tt_1_1_io_update_old_ctr_1_REG <= s1_update_old_ctr_1_1; // @[tage.scala:256:31, :368:48] tt_1_1_io_update_u_mask_1_REG <= s1_update_u_mask_1_1; // @[tage.scala:253:35, :370:47] tt_1_1_io_update_u_1_REG <= s1_update_u_1_1; // @[tage.scala:258:31, :371:47] tt_1_1_io_update_mask_2_REG <= s1_update_mask_1_2; // @[tage.scala:252:33, :365:48] tt_1_1_io_update_taken_2_REG <= s1_update_taken_1_2; // @[tage.scala:255:31, :366:48] tt_1_1_io_update_alloc_2_REG <= s1_update_alloc_1_2; // @[tage.scala:257:31, :367:48] tt_1_1_io_update_old_ctr_2_REG <= s1_update_old_ctr_1_2; // @[tage.scala:256:31, :368:48] tt_1_1_io_update_u_mask_2_REG <= s1_update_u_mask_1_2; // @[tage.scala:253:35, :370:47] tt_1_1_io_update_u_2_REG <= s1_update_u_1_2; // @[tage.scala:258:31, :371:47] tt_1_1_io_update_mask_3_REG <= s1_update_mask_1_3; // @[tage.scala:252:33, :365:48] tt_1_1_io_update_taken_3_REG <= s1_update_taken_1_3; // @[tage.scala:255:31, :366:48] tt_1_1_io_update_alloc_3_REG <= s1_update_alloc_1_3; // @[tage.scala:257:31, :367:48] tt_1_1_io_update_old_ctr_3_REG <= s1_update_old_ctr_1_3; // @[tage.scala:256:31, :368:48] tt_1_1_io_update_u_mask_3_REG <= s1_update_u_mask_1_3; // @[tage.scala:253:35, :370:47] tt_1_1_io_update_u_3_REG <= s1_update_u_1_3; // @[tage.scala:258:31, :371:47] tt_1_1_io_update_pc_REG <= s1_update_bits_pc; // @[tage.scala:373:41] tt_1_1_io_update_hist_REG <= s1_update_bits_ghist; // @[tage.scala:374:41] tt_2_1_io_update_mask_0_REG <= s1_update_mask_2_0; // @[tage.scala:252:33, :365:48] tt_2_1_io_update_taken_0_REG <= s1_update_taken_2_0; // @[tage.scala:255:31, :366:48] tt_2_1_io_update_alloc_0_REG <= s1_update_alloc_2_0; // @[tage.scala:257:31, :367:48] tt_2_1_io_update_old_ctr_0_REG <= s1_update_old_ctr_2_0; // @[tage.scala:256:31, :368:48] tt_2_1_io_update_u_mask_0_REG <= s1_update_u_mask_2_0; // @[tage.scala:253:35, :370:47] tt_2_1_io_update_u_0_REG <= s1_update_u_2_0; // @[tage.scala:258:31, :371:47] tt_2_1_io_update_mask_1_REG <= s1_update_mask_2_1; // @[tage.scala:252:33, :365:48] tt_2_1_io_update_taken_1_REG <= s1_update_taken_2_1; // @[tage.scala:255:31, :366:48] tt_2_1_io_update_alloc_1_REG <= s1_update_alloc_2_1; // @[tage.scala:257:31, :367:48] tt_2_1_io_update_old_ctr_1_REG <= s1_update_old_ctr_2_1; // @[tage.scala:256:31, :368:48] tt_2_1_io_update_u_mask_1_REG <= s1_update_u_mask_2_1; // @[tage.scala:253:35, :370:47] tt_2_1_io_update_u_1_REG <= s1_update_u_2_1; // @[tage.scala:258:31, :371:47] tt_2_1_io_update_mask_2_REG <= s1_update_mask_2_2; // @[tage.scala:252:33, :365:48] tt_2_1_io_update_taken_2_REG <= s1_update_taken_2_2; // @[tage.scala:255:31, :366:48] tt_2_1_io_update_alloc_2_REG <= s1_update_alloc_2_2; // @[tage.scala:257:31, :367:48] tt_2_1_io_update_old_ctr_2_REG <= s1_update_old_ctr_2_2; // @[tage.scala:256:31, :368:48] tt_2_1_io_update_u_mask_2_REG <= s1_update_u_mask_2_2; // @[tage.scala:253:35, :370:47] tt_2_1_io_update_u_2_REG <= s1_update_u_2_2; // @[tage.scala:258:31, :371:47] tt_2_1_io_update_mask_3_REG <= s1_update_mask_2_3; // @[tage.scala:252:33, :365:48] tt_2_1_io_update_taken_3_REG <= s1_update_taken_2_3; // @[tage.scala:255:31, :366:48] tt_2_1_io_update_alloc_3_REG <= s1_update_alloc_2_3; // @[tage.scala:257:31, :367:48] tt_2_1_io_update_old_ctr_3_REG <= s1_update_old_ctr_2_3; // @[tage.scala:256:31, :368:48] tt_2_1_io_update_u_mask_3_REG <= s1_update_u_mask_2_3; // @[tage.scala:253:35, :370:47] tt_2_1_io_update_u_3_REG <= s1_update_u_2_3; // @[tage.scala:258:31, :371:47] tt_2_1_io_update_pc_REG <= s1_update_bits_pc; // @[tage.scala:373:41] tt_2_1_io_update_hist_REG <= s1_update_bits_ghist; // @[tage.scala:374:41] tt_3_1_io_update_mask_0_REG <= s1_update_mask_3_0; // @[tage.scala:252:33, :365:48] tt_3_1_io_update_taken_0_REG <= s1_update_taken_3_0; // @[tage.scala:255:31, :366:48] tt_3_1_io_update_alloc_0_REG <= s1_update_alloc_3_0; // @[tage.scala:257:31, :367:48] tt_3_1_io_update_old_ctr_0_REG <= s1_update_old_ctr_3_0; // @[tage.scala:256:31, :368:48] tt_3_1_io_update_u_mask_0_REG <= s1_update_u_mask_3_0; // @[tage.scala:253:35, :370:47] tt_3_1_io_update_u_0_REG <= s1_update_u_3_0; // @[tage.scala:258:31, :371:47] tt_3_1_io_update_mask_1_REG <= s1_update_mask_3_1; // @[tage.scala:252:33, :365:48] tt_3_1_io_update_taken_1_REG <= s1_update_taken_3_1; // @[tage.scala:255:31, :366:48] tt_3_1_io_update_alloc_1_REG <= s1_update_alloc_3_1; // @[tage.scala:257:31, :367:48] tt_3_1_io_update_old_ctr_1_REG <= s1_update_old_ctr_3_1; // @[tage.scala:256:31, :368:48] tt_3_1_io_update_u_mask_1_REG <= s1_update_u_mask_3_1; // @[tage.scala:253:35, :370:47] tt_3_1_io_update_u_1_REG <= s1_update_u_3_1; // @[tage.scala:258:31, :371:47] tt_3_1_io_update_mask_2_REG <= s1_update_mask_3_2; // @[tage.scala:252:33, :365:48] tt_3_1_io_update_taken_2_REG <= s1_update_taken_3_2; // @[tage.scala:255:31, :366:48] tt_3_1_io_update_alloc_2_REG <= s1_update_alloc_3_2; // @[tage.scala:257:31, :367:48] tt_3_1_io_update_old_ctr_2_REG <= s1_update_old_ctr_3_2; // @[tage.scala:256:31, :368:48] tt_3_1_io_update_u_mask_2_REG <= s1_update_u_mask_3_2; // @[tage.scala:253:35, :370:47] tt_3_1_io_update_u_2_REG <= s1_update_u_3_2; // @[tage.scala:258:31, :371:47] tt_3_1_io_update_mask_3_REG <= s1_update_mask_3_3; // @[tage.scala:252:33, :365:48] tt_3_1_io_update_taken_3_REG <= s1_update_taken_3_3; // @[tage.scala:255:31, :366:48] tt_3_1_io_update_alloc_3_REG <= s1_update_alloc_3_3; // @[tage.scala:257:31, :367:48] tt_3_1_io_update_old_ctr_3_REG <= s1_update_old_ctr_3_3; // @[tage.scala:256:31, :368:48] tt_3_1_io_update_u_mask_3_REG <= s1_update_u_mask_3_3; // @[tage.scala:253:35, :370:47] tt_3_1_io_update_u_3_REG <= s1_update_u_3_3; // @[tage.scala:258:31, :371:47] tt_3_1_io_update_pc_REG <= s1_update_bits_pc; // @[tage.scala:373:41] tt_3_1_io_update_hist_REG <= s1_update_bits_ghist; // @[tage.scala:374:41] tt_4_1_io_update_mask_0_REG <= s1_update_mask_4_0; // @[tage.scala:252:33, :365:48] tt_4_1_io_update_taken_0_REG <= s1_update_taken_4_0; // @[tage.scala:255:31, :366:48] tt_4_1_io_update_alloc_0_REG <= s1_update_alloc_4_0; // @[tage.scala:257:31, :367:48] tt_4_1_io_update_old_ctr_0_REG <= s1_update_old_ctr_4_0; // @[tage.scala:256:31, :368:48] tt_4_1_io_update_u_mask_0_REG <= s1_update_u_mask_4_0; // @[tage.scala:253:35, :370:47] tt_4_1_io_update_u_0_REG <= s1_update_u_4_0; // @[tage.scala:258:31, :371:47] tt_4_1_io_update_mask_1_REG <= s1_update_mask_4_1; // @[tage.scala:252:33, :365:48] tt_4_1_io_update_taken_1_REG <= s1_update_taken_4_1; // @[tage.scala:255:31, :366:48] tt_4_1_io_update_alloc_1_REG <= s1_update_alloc_4_1; // @[tage.scala:257:31, :367:48] tt_4_1_io_update_old_ctr_1_REG <= s1_update_old_ctr_4_1; // @[tage.scala:256:31, :368:48] tt_4_1_io_update_u_mask_1_REG <= s1_update_u_mask_4_1; // @[tage.scala:253:35, :370:47] tt_4_1_io_update_u_1_REG <= s1_update_u_4_1; // @[tage.scala:258:31, :371:47] tt_4_1_io_update_mask_2_REG <= s1_update_mask_4_2; // @[tage.scala:252:33, :365:48] tt_4_1_io_update_taken_2_REG <= s1_update_taken_4_2; // @[tage.scala:255:31, :366:48] tt_4_1_io_update_alloc_2_REG <= s1_update_alloc_4_2; // @[tage.scala:257:31, :367:48] tt_4_1_io_update_old_ctr_2_REG <= s1_update_old_ctr_4_2; // @[tage.scala:256:31, :368:48] tt_4_1_io_update_u_mask_2_REG <= s1_update_u_mask_4_2; // @[tage.scala:253:35, :370:47] tt_4_1_io_update_u_2_REG <= s1_update_u_4_2; // @[tage.scala:258:31, :371:47] tt_4_1_io_update_mask_3_REG <= s1_update_mask_4_3; // @[tage.scala:252:33, :365:48] tt_4_1_io_update_taken_3_REG <= s1_update_taken_4_3; // @[tage.scala:255:31, :366:48] tt_4_1_io_update_alloc_3_REG <= s1_update_alloc_4_3; // @[tage.scala:257:31, :367:48] tt_4_1_io_update_old_ctr_3_REG <= s1_update_old_ctr_4_3; // @[tage.scala:256:31, :368:48] tt_4_1_io_update_u_mask_3_REG <= s1_update_u_mask_4_3; // @[tage.scala:253:35, :370:47] tt_4_1_io_update_u_3_REG <= s1_update_u_4_3; // @[tage.scala:258:31, :371:47] tt_4_1_io_update_pc_REG <= s1_update_bits_pc; // @[tage.scala:373:41] tt_4_1_io_update_hist_REG <= s1_update_bits_ghist; // @[tage.scala:374:41] tt_5_1_io_update_mask_0_REG <= s1_update_mask_5_0; // @[tage.scala:252:33, :365:48] tt_5_1_io_update_taken_0_REG <= s1_update_taken_5_0; // @[tage.scala:255:31, :366:48] tt_5_1_io_update_alloc_0_REG <= s1_update_alloc_5_0; // @[tage.scala:257:31, :367:48] tt_5_1_io_update_old_ctr_0_REG <= s1_update_old_ctr_5_0; // @[tage.scala:256:31, :368:48] tt_5_1_io_update_u_mask_0_REG <= s1_update_u_mask_5_0; // @[tage.scala:253:35, :370:47] tt_5_1_io_update_u_0_REG <= s1_update_u_5_0; // @[tage.scala:258:31, :371:47] tt_5_1_io_update_mask_1_REG <= s1_update_mask_5_1; // @[tage.scala:252:33, :365:48] tt_5_1_io_update_taken_1_REG <= s1_update_taken_5_1; // @[tage.scala:255:31, :366:48] tt_5_1_io_update_alloc_1_REG <= s1_update_alloc_5_1; // @[tage.scala:257:31, :367:48] tt_5_1_io_update_old_ctr_1_REG <= s1_update_old_ctr_5_1; // @[tage.scala:256:31, :368:48] tt_5_1_io_update_u_mask_1_REG <= s1_update_u_mask_5_1; // @[tage.scala:253:35, :370:47] tt_5_1_io_update_u_1_REG <= s1_update_u_5_1; // @[tage.scala:258:31, :371:47] tt_5_1_io_update_mask_2_REG <= s1_update_mask_5_2; // @[tage.scala:252:33, :365:48] tt_5_1_io_update_taken_2_REG <= s1_update_taken_5_2; // @[tage.scala:255:31, :366:48] tt_5_1_io_update_alloc_2_REG <= s1_update_alloc_5_2; // @[tage.scala:257:31, :367:48] tt_5_1_io_update_old_ctr_2_REG <= s1_update_old_ctr_5_2; // @[tage.scala:256:31, :368:48] tt_5_1_io_update_u_mask_2_REG <= s1_update_u_mask_5_2; // @[tage.scala:253:35, :370:47] tt_5_1_io_update_u_2_REG <= s1_update_u_5_2; // @[tage.scala:258:31, :371:47] tt_5_1_io_update_mask_3_REG <= s1_update_mask_5_3; // @[tage.scala:252:33, :365:48] tt_5_1_io_update_taken_3_REG <= s1_update_taken_5_3; // @[tage.scala:255:31, :366:48] tt_5_1_io_update_alloc_3_REG <= s1_update_alloc_5_3; // @[tage.scala:257:31, :367:48] tt_5_1_io_update_old_ctr_3_REG <= s1_update_old_ctr_5_3; // @[tage.scala:256:31, :368:48] tt_5_1_io_update_u_mask_3_REG <= s1_update_u_mask_5_3; // @[tage.scala:253:35, :370:47] tt_5_1_io_update_u_3_REG <= s1_update_u_5_3; // @[tage.scala:258:31, :371:47] tt_5_1_io_update_pc_REG <= s1_update_bits_pc; // @[tage.scala:373:41] tt_5_1_io_update_hist_REG <= s1_update_bits_ghist; // @[tage.scala:374:41] always @(posedge) TageTable tt_0_1 ( // @[tage.scala:235:21] .clock (clock), .reset (reset), .io_f1_req_valid (t_io_f1_req_valid_REG), // @[tage.scala:236:35] .io_f1_req_pc (t_io_f1_req_pc_REG), // @[tage.scala:237:35] .io_f1_req_ghist (io_f1_ghist_0), // @[tage.scala:209:7] .io_f2_resp_0_valid (f2_resps_0_0_valid), .io_f2_resp_0_bits_ctr (f2_resps_0_0_bits_ctr), .io_f2_resp_0_bits_u (f2_resps_0_0_bits_u), .io_f2_resp_1_valid (f2_resps_0_1_valid), .io_f2_resp_1_bits_ctr (f2_resps_0_1_bits_ctr), .io_f2_resp_1_bits_u (f2_resps_0_1_bits_u), .io_f2_resp_2_valid (f2_resps_0_2_valid), .io_f2_resp_2_bits_ctr (f2_resps_0_2_bits_ctr), .io_f2_resp_2_bits_u (f2_resps_0_2_bits_u), .io_f2_resp_3_valid (f2_resps_0_3_valid), .io_f2_resp_3_bits_ctr (f2_resps_0_3_bits_ctr), .io_f2_resp_3_bits_u (f2_resps_0_3_bits_u), .io_update_mask_0 (tt_0_1_io_update_mask_0_REG), // @[tage.scala:365:48] .io_update_mask_1 (tt_0_1_io_update_mask_1_REG), // @[tage.scala:365:48] .io_update_mask_2 (tt_0_1_io_update_mask_2_REG), // @[tage.scala:365:48] .io_update_mask_3 (tt_0_1_io_update_mask_3_REG), // @[tage.scala:365:48] .io_update_taken_0 (tt_0_1_io_update_taken_0_REG), // @[tage.scala:366:48] .io_update_taken_1 (tt_0_1_io_update_taken_1_REG), // @[tage.scala:366:48] .io_update_taken_2 (tt_0_1_io_update_taken_2_REG), // @[tage.scala:366:48] .io_update_taken_3 (tt_0_1_io_update_taken_3_REG), // @[tage.scala:366:48] .io_update_alloc_0 (tt_0_1_io_update_alloc_0_REG), // @[tage.scala:367:48] .io_update_alloc_1 (tt_0_1_io_update_alloc_1_REG), // @[tage.scala:367:48] .io_update_alloc_2 (tt_0_1_io_update_alloc_2_REG), // @[tage.scala:367:48] .io_update_alloc_3 (tt_0_1_io_update_alloc_3_REG), // @[tage.scala:367:48] .io_update_old_ctr_0 (tt_0_1_io_update_old_ctr_0_REG), // @[tage.scala:368:48] .io_update_old_ctr_1 (tt_0_1_io_update_old_ctr_1_REG), // @[tage.scala:368:48] .io_update_old_ctr_2 (tt_0_1_io_update_old_ctr_2_REG), // @[tage.scala:368:48] .io_update_old_ctr_3 (tt_0_1_io_update_old_ctr_3_REG), // @[tage.scala:368:48] .io_update_pc (tt_0_1_io_update_pc_REG), // @[tage.scala:373:41] .io_update_hist (tt_0_1_io_update_hist_REG), // @[tage.scala:374:41] .io_update_u_mask_0 (tt_0_1_io_update_u_mask_0_REG), // @[tage.scala:370:47] .io_update_u_mask_1 (tt_0_1_io_update_u_mask_1_REG), // @[tage.scala:370:47] .io_update_u_mask_2 (tt_0_1_io_update_u_mask_2_REG), // @[tage.scala:370:47] .io_update_u_mask_3 (tt_0_1_io_update_u_mask_3_REG), // @[tage.scala:370:47] .io_update_u_0 (tt_0_1_io_update_u_0_REG), // @[tage.scala:371:47] .io_update_u_1 (tt_0_1_io_update_u_1_REG), // @[tage.scala:371:47] .io_update_u_2 (tt_0_1_io_update_u_2_REG), // @[tage.scala:371:47] .io_update_u_3 (tt_0_1_io_update_u_3_REG) // @[tage.scala:371:47] ); // @[tage.scala:235:21] TageTable_1 tt_1_1 ( // @[tage.scala:235:21] .clock (clock), .reset (reset), .io_f1_req_valid (t_io_f1_req_valid_REG_1), // @[tage.scala:236:35] .io_f1_req_pc (t_io_f1_req_pc_REG_1), // @[tage.scala:237:35] .io_f1_req_ghist (io_f1_ghist_0), // @[tage.scala:209:7] .io_f2_resp_0_valid (f2_resps_1_0_valid), .io_f2_resp_0_bits_ctr (f2_resps_1_0_bits_ctr), .io_f2_resp_0_bits_u (f2_resps_1_0_bits_u), .io_f2_resp_1_valid (f2_resps_1_1_valid), .io_f2_resp_1_bits_ctr (f2_resps_1_1_bits_ctr), .io_f2_resp_1_bits_u (f2_resps_1_1_bits_u), .io_f2_resp_2_valid (f2_resps_1_2_valid), .io_f2_resp_2_bits_ctr (f2_resps_1_2_bits_ctr), .io_f2_resp_2_bits_u (f2_resps_1_2_bits_u), .io_f2_resp_3_valid (f2_resps_1_3_valid), .io_f2_resp_3_bits_ctr (f2_resps_1_3_bits_ctr), .io_f2_resp_3_bits_u (f2_resps_1_3_bits_u), .io_update_mask_0 (tt_1_1_io_update_mask_0_REG), // @[tage.scala:365:48] .io_update_mask_1 (tt_1_1_io_update_mask_1_REG), // @[tage.scala:365:48] .io_update_mask_2 (tt_1_1_io_update_mask_2_REG), // @[tage.scala:365:48] .io_update_mask_3 (tt_1_1_io_update_mask_3_REG), // @[tage.scala:365:48] .io_update_taken_0 (tt_1_1_io_update_taken_0_REG), // @[tage.scala:366:48] .io_update_taken_1 (tt_1_1_io_update_taken_1_REG), // @[tage.scala:366:48] .io_update_taken_2 (tt_1_1_io_update_taken_2_REG), // @[tage.scala:366:48] .io_update_taken_3 (tt_1_1_io_update_taken_3_REG), // @[tage.scala:366:48] .io_update_alloc_0 (tt_1_1_io_update_alloc_0_REG), // @[tage.scala:367:48] .io_update_alloc_1 (tt_1_1_io_update_alloc_1_REG), // @[tage.scala:367:48] .io_update_alloc_2 (tt_1_1_io_update_alloc_2_REG), // @[tage.scala:367:48] .io_update_alloc_3 (tt_1_1_io_update_alloc_3_REG), // @[tage.scala:367:48] .io_update_old_ctr_0 (tt_1_1_io_update_old_ctr_0_REG), // @[tage.scala:368:48] .io_update_old_ctr_1 (tt_1_1_io_update_old_ctr_1_REG), // @[tage.scala:368:48] .io_update_old_ctr_2 (tt_1_1_io_update_old_ctr_2_REG), // @[tage.scala:368:48] .io_update_old_ctr_3 (tt_1_1_io_update_old_ctr_3_REG), // @[tage.scala:368:48] .io_update_pc (tt_1_1_io_update_pc_REG), // @[tage.scala:373:41] .io_update_hist (tt_1_1_io_update_hist_REG), // @[tage.scala:374:41] .io_update_u_mask_0 (tt_1_1_io_update_u_mask_0_REG), // @[tage.scala:370:47] .io_update_u_mask_1 (tt_1_1_io_update_u_mask_1_REG), // @[tage.scala:370:47] .io_update_u_mask_2 (tt_1_1_io_update_u_mask_2_REG), // @[tage.scala:370:47] .io_update_u_mask_3 (tt_1_1_io_update_u_mask_3_REG), // @[tage.scala:370:47] .io_update_u_0 (tt_1_1_io_update_u_0_REG), // @[tage.scala:371:47] .io_update_u_1 (tt_1_1_io_update_u_1_REG), // @[tage.scala:371:47] .io_update_u_2 (tt_1_1_io_update_u_2_REG), // @[tage.scala:371:47] .io_update_u_3 (tt_1_1_io_update_u_3_REG) // @[tage.scala:371:47] ); // @[tage.scala:235:21] TageTable_2 tt_2_1 ( // @[tage.scala:235:21] .clock (clock), .reset (reset), .io_f1_req_valid (t_io_f1_req_valid_REG_2), // @[tage.scala:236:35] .io_f1_req_pc (t_io_f1_req_pc_REG_2), // @[tage.scala:237:35] .io_f1_req_ghist (io_f1_ghist_0), // @[tage.scala:209:7] .io_f2_resp_0_valid (f2_resps_2_0_valid), .io_f2_resp_0_bits_ctr (f2_resps_2_0_bits_ctr), .io_f2_resp_0_bits_u (f2_resps_2_0_bits_u), .io_f2_resp_1_valid (f2_resps_2_1_valid), .io_f2_resp_1_bits_ctr (f2_resps_2_1_bits_ctr), .io_f2_resp_1_bits_u (f2_resps_2_1_bits_u), .io_f2_resp_2_valid (f2_resps_2_2_valid), .io_f2_resp_2_bits_ctr (f2_resps_2_2_bits_ctr), .io_f2_resp_2_bits_u (f2_resps_2_2_bits_u), .io_f2_resp_3_valid (f2_resps_2_3_valid), .io_f2_resp_3_bits_ctr (f2_resps_2_3_bits_ctr), .io_f2_resp_3_bits_u (f2_resps_2_3_bits_u), .io_update_mask_0 (tt_2_1_io_update_mask_0_REG), // @[tage.scala:365:48] .io_update_mask_1 (tt_2_1_io_update_mask_1_REG), // @[tage.scala:365:48] .io_update_mask_2 (tt_2_1_io_update_mask_2_REG), // @[tage.scala:365:48] .io_update_mask_3 (tt_2_1_io_update_mask_3_REG), // @[tage.scala:365:48] .io_update_taken_0 (tt_2_1_io_update_taken_0_REG), // @[tage.scala:366:48] .io_update_taken_1 (tt_2_1_io_update_taken_1_REG), // @[tage.scala:366:48] .io_update_taken_2 (tt_2_1_io_update_taken_2_REG), // @[tage.scala:366:48] .io_update_taken_3 (tt_2_1_io_update_taken_3_REG), // @[tage.scala:366:48] .io_update_alloc_0 (tt_2_1_io_update_alloc_0_REG), // @[tage.scala:367:48] .io_update_alloc_1 (tt_2_1_io_update_alloc_1_REG), // @[tage.scala:367:48] .io_update_alloc_2 (tt_2_1_io_update_alloc_2_REG), // @[tage.scala:367:48] .io_update_alloc_3 (tt_2_1_io_update_alloc_3_REG), // @[tage.scala:367:48] .io_update_old_ctr_0 (tt_2_1_io_update_old_ctr_0_REG), // @[tage.scala:368:48] .io_update_old_ctr_1 (tt_2_1_io_update_old_ctr_1_REG), // @[tage.scala:368:48] .io_update_old_ctr_2 (tt_2_1_io_update_old_ctr_2_REG), // @[tage.scala:368:48] .io_update_old_ctr_3 (tt_2_1_io_update_old_ctr_3_REG), // @[tage.scala:368:48] .io_update_pc (tt_2_1_io_update_pc_REG), // @[tage.scala:373:41] .io_update_hist (tt_2_1_io_update_hist_REG), // @[tage.scala:374:41] .io_update_u_mask_0 (tt_2_1_io_update_u_mask_0_REG), // @[tage.scala:370:47] .io_update_u_mask_1 (tt_2_1_io_update_u_mask_1_REG), // @[tage.scala:370:47] .io_update_u_mask_2 (tt_2_1_io_update_u_mask_2_REG), // @[tage.scala:370:47] .io_update_u_mask_3 (tt_2_1_io_update_u_mask_3_REG), // @[tage.scala:370:47] .io_update_u_0 (tt_2_1_io_update_u_0_REG), // @[tage.scala:371:47] .io_update_u_1 (tt_2_1_io_update_u_1_REG), // @[tage.scala:371:47] .io_update_u_2 (tt_2_1_io_update_u_2_REG), // @[tage.scala:371:47] .io_update_u_3 (tt_2_1_io_update_u_3_REG) // @[tage.scala:371:47] ); // @[tage.scala:235:21] TageTable_3 tt_3_1 ( // @[tage.scala:235:21] .clock (clock), .reset (reset), .io_f1_req_valid (t_io_f1_req_valid_REG_3), // @[tage.scala:236:35] .io_f1_req_pc (t_io_f1_req_pc_REG_3), // @[tage.scala:237:35] .io_f1_req_ghist (io_f1_ghist_0), // @[tage.scala:209:7] .io_f2_resp_0_valid (f2_resps_3_0_valid), .io_f2_resp_0_bits_ctr (f2_resps_3_0_bits_ctr), .io_f2_resp_0_bits_u (f2_resps_3_0_bits_u), .io_f2_resp_1_valid (f2_resps_3_1_valid), .io_f2_resp_1_bits_ctr (f2_resps_3_1_bits_ctr), .io_f2_resp_1_bits_u (f2_resps_3_1_bits_u), .io_f2_resp_2_valid (f2_resps_3_2_valid), .io_f2_resp_2_bits_ctr (f2_resps_3_2_bits_ctr), .io_f2_resp_2_bits_u (f2_resps_3_2_bits_u), .io_f2_resp_3_valid (f2_resps_3_3_valid), .io_f2_resp_3_bits_ctr (f2_resps_3_3_bits_ctr), .io_f2_resp_3_bits_u (f2_resps_3_3_bits_u), .io_update_mask_0 (tt_3_1_io_update_mask_0_REG), // @[tage.scala:365:48] .io_update_mask_1 (tt_3_1_io_update_mask_1_REG), // @[tage.scala:365:48] .io_update_mask_2 (tt_3_1_io_update_mask_2_REG), // @[tage.scala:365:48] .io_update_mask_3 (tt_3_1_io_update_mask_3_REG), // @[tage.scala:365:48] .io_update_taken_0 (tt_3_1_io_update_taken_0_REG), // @[tage.scala:366:48] .io_update_taken_1 (tt_3_1_io_update_taken_1_REG), // @[tage.scala:366:48] .io_update_taken_2 (tt_3_1_io_update_taken_2_REG), // @[tage.scala:366:48] .io_update_taken_3 (tt_3_1_io_update_taken_3_REG), // @[tage.scala:366:48] .io_update_alloc_0 (tt_3_1_io_update_alloc_0_REG), // @[tage.scala:367:48] .io_update_alloc_1 (tt_3_1_io_update_alloc_1_REG), // @[tage.scala:367:48] .io_update_alloc_2 (tt_3_1_io_update_alloc_2_REG), // @[tage.scala:367:48] .io_update_alloc_3 (tt_3_1_io_update_alloc_3_REG), // @[tage.scala:367:48] .io_update_old_ctr_0 (tt_3_1_io_update_old_ctr_0_REG), // @[tage.scala:368:48] .io_update_old_ctr_1 (tt_3_1_io_update_old_ctr_1_REG), // @[tage.scala:368:48] .io_update_old_ctr_2 (tt_3_1_io_update_old_ctr_2_REG), // @[tage.scala:368:48] .io_update_old_ctr_3 (tt_3_1_io_update_old_ctr_3_REG), // @[tage.scala:368:48] .io_update_pc (tt_3_1_io_update_pc_REG), // @[tage.scala:373:41] .io_update_hist (tt_3_1_io_update_hist_REG), // @[tage.scala:374:41] .io_update_u_mask_0 (tt_3_1_io_update_u_mask_0_REG), // @[tage.scala:370:47] .io_update_u_mask_1 (tt_3_1_io_update_u_mask_1_REG), // @[tage.scala:370:47] .io_update_u_mask_2 (tt_3_1_io_update_u_mask_2_REG), // @[tage.scala:370:47] .io_update_u_mask_3 (tt_3_1_io_update_u_mask_3_REG), // @[tage.scala:370:47] .io_update_u_0 (tt_3_1_io_update_u_0_REG), // @[tage.scala:371:47] .io_update_u_1 (tt_3_1_io_update_u_1_REG), // @[tage.scala:371:47] .io_update_u_2 (tt_3_1_io_update_u_2_REG), // @[tage.scala:371:47] .io_update_u_3 (tt_3_1_io_update_u_3_REG) // @[tage.scala:371:47] ); // @[tage.scala:235:21] TageTable_4 tt_4_1 ( // @[tage.scala:235:21] .clock (clock), .reset (reset), .io_f1_req_valid (t_io_f1_req_valid_REG_4), // @[tage.scala:236:35] .io_f1_req_pc (t_io_f1_req_pc_REG_4), // @[tage.scala:237:35] .io_f1_req_ghist (io_f1_ghist_0), // @[tage.scala:209:7] .io_f2_resp_0_valid (f2_resps_4_0_valid), .io_f2_resp_0_bits_ctr (f2_resps_4_0_bits_ctr), .io_f2_resp_0_bits_u (f2_resps_4_0_bits_u), .io_f2_resp_1_valid (f2_resps_4_1_valid), .io_f2_resp_1_bits_ctr (f2_resps_4_1_bits_ctr), .io_f2_resp_1_bits_u (f2_resps_4_1_bits_u), .io_f2_resp_2_valid (f2_resps_4_2_valid), .io_f2_resp_2_bits_ctr (f2_resps_4_2_bits_ctr), .io_f2_resp_2_bits_u (f2_resps_4_2_bits_u), .io_f2_resp_3_valid (f2_resps_4_3_valid), .io_f2_resp_3_bits_ctr (f2_resps_4_3_bits_ctr), .io_f2_resp_3_bits_u (f2_resps_4_3_bits_u), .io_update_mask_0 (tt_4_1_io_update_mask_0_REG), // @[tage.scala:365:48] .io_update_mask_1 (tt_4_1_io_update_mask_1_REG), // @[tage.scala:365:48] .io_update_mask_2 (tt_4_1_io_update_mask_2_REG), // @[tage.scala:365:48] .io_update_mask_3 (tt_4_1_io_update_mask_3_REG), // @[tage.scala:365:48] .io_update_taken_0 (tt_4_1_io_update_taken_0_REG), // @[tage.scala:366:48] .io_update_taken_1 (tt_4_1_io_update_taken_1_REG), // @[tage.scala:366:48] .io_update_taken_2 (tt_4_1_io_update_taken_2_REG), // @[tage.scala:366:48] .io_update_taken_3 (tt_4_1_io_update_taken_3_REG), // @[tage.scala:366:48] .io_update_alloc_0 (tt_4_1_io_update_alloc_0_REG), // @[tage.scala:367:48] .io_update_alloc_1 (tt_4_1_io_update_alloc_1_REG), // @[tage.scala:367:48] .io_update_alloc_2 (tt_4_1_io_update_alloc_2_REG), // @[tage.scala:367:48] .io_update_alloc_3 (tt_4_1_io_update_alloc_3_REG), // @[tage.scala:367:48] .io_update_old_ctr_0 (tt_4_1_io_update_old_ctr_0_REG), // @[tage.scala:368:48] .io_update_old_ctr_1 (tt_4_1_io_update_old_ctr_1_REG), // @[tage.scala:368:48] .io_update_old_ctr_2 (tt_4_1_io_update_old_ctr_2_REG), // @[tage.scala:368:48] .io_update_old_ctr_3 (tt_4_1_io_update_old_ctr_3_REG), // @[tage.scala:368:48] .io_update_pc (tt_4_1_io_update_pc_REG), // @[tage.scala:373:41] .io_update_hist (tt_4_1_io_update_hist_REG), // @[tage.scala:374:41] .io_update_u_mask_0 (tt_4_1_io_update_u_mask_0_REG), // @[tage.scala:370:47] .io_update_u_mask_1 (tt_4_1_io_update_u_mask_1_REG), // @[tage.scala:370:47] .io_update_u_mask_2 (tt_4_1_io_update_u_mask_2_REG), // @[tage.scala:370:47] .io_update_u_mask_3 (tt_4_1_io_update_u_mask_3_REG), // @[tage.scala:370:47] .io_update_u_0 (tt_4_1_io_update_u_0_REG), // @[tage.scala:371:47] .io_update_u_1 (tt_4_1_io_update_u_1_REG), // @[tage.scala:371:47] .io_update_u_2 (tt_4_1_io_update_u_2_REG), // @[tage.scala:371:47] .io_update_u_3 (tt_4_1_io_update_u_3_REG) // @[tage.scala:371:47] ); // @[tage.scala:235:21] TageTable_5 tt_5_1 ( // @[tage.scala:235:21] .clock (clock), .reset (reset), .io_f1_req_valid (t_io_f1_req_valid_REG_5), // @[tage.scala:236:35] .io_f1_req_pc (t_io_f1_req_pc_REG_5), // @[tage.scala:237:35] .io_f1_req_ghist (io_f1_ghist_0), // @[tage.scala:209:7] .io_f2_resp_0_valid (f2_resps_5_0_valid), .io_f2_resp_0_bits_ctr (f2_resps_5_0_bits_ctr), .io_f2_resp_0_bits_u (f2_resps_5_0_bits_u), .io_f2_resp_1_valid (f2_resps_5_1_valid), .io_f2_resp_1_bits_ctr (f2_resps_5_1_bits_ctr), .io_f2_resp_1_bits_u (f2_resps_5_1_bits_u), .io_f2_resp_2_valid (f2_resps_5_2_valid), .io_f2_resp_2_bits_ctr (f2_resps_5_2_bits_ctr), .io_f2_resp_2_bits_u (f2_resps_5_2_bits_u), .io_f2_resp_3_valid (f2_resps_5_3_valid), .io_f2_resp_3_bits_ctr (f2_resps_5_3_bits_ctr), .io_f2_resp_3_bits_u (f2_resps_5_3_bits_u), .io_update_mask_0 (tt_5_1_io_update_mask_0_REG), // @[tage.scala:365:48] .io_update_mask_1 (tt_5_1_io_update_mask_1_REG), // @[tage.scala:365:48] .io_update_mask_2 (tt_5_1_io_update_mask_2_REG), // @[tage.scala:365:48] .io_update_mask_3 (tt_5_1_io_update_mask_3_REG), // @[tage.scala:365:48] .io_update_taken_0 (tt_5_1_io_update_taken_0_REG), // @[tage.scala:366:48] .io_update_taken_1 (tt_5_1_io_update_taken_1_REG), // @[tage.scala:366:48] .io_update_taken_2 (tt_5_1_io_update_taken_2_REG), // @[tage.scala:366:48] .io_update_taken_3 (tt_5_1_io_update_taken_3_REG), // @[tage.scala:366:48] .io_update_alloc_0 (tt_5_1_io_update_alloc_0_REG), // @[tage.scala:367:48] .io_update_alloc_1 (tt_5_1_io_update_alloc_1_REG), // @[tage.scala:367:48] .io_update_alloc_2 (tt_5_1_io_update_alloc_2_REG), // @[tage.scala:367:48] .io_update_alloc_3 (tt_5_1_io_update_alloc_3_REG), // @[tage.scala:367:48] .io_update_old_ctr_0 (tt_5_1_io_update_old_ctr_0_REG), // @[tage.scala:368:48] .io_update_old_ctr_1 (tt_5_1_io_update_old_ctr_1_REG), // @[tage.scala:368:48] .io_update_old_ctr_2 (tt_5_1_io_update_old_ctr_2_REG), // @[tage.scala:368:48] .io_update_old_ctr_3 (tt_5_1_io_update_old_ctr_3_REG), // @[tage.scala:368:48] .io_update_pc (tt_5_1_io_update_pc_REG), // @[tage.scala:373:41] .io_update_hist (tt_5_1_io_update_hist_REG), // @[tage.scala:374:41] .io_update_u_mask_0 (tt_5_1_io_update_u_mask_0_REG), // @[tage.scala:370:47] .io_update_u_mask_1 (tt_5_1_io_update_u_mask_1_REG), // @[tage.scala:370:47] .io_update_u_mask_2 (tt_5_1_io_update_u_mask_2_REG), // @[tage.scala:370:47] .io_update_u_mask_3 (tt_5_1_io_update_u_mask_3_REG), // @[tage.scala:370:47] .io_update_u_0 (tt_5_1_io_update_u_0_REG), // @[tage.scala:371:47] .io_update_u_1 (tt_5_1_io_update_u_1_REG), // @[tage.scala:371:47] .io_update_u_2 (tt_5_1_io_update_u_2_REG), // @[tage.scala:371:47] .io_update_u_3 (tt_5_1_io_update_u_3_REG) // @[tage.scala:371:47] ); // @[tage.scala:235:21] MaxPeriodFibonacciLFSR_3 alloc_lfsr_prng ( // @[PRNG.scala:91:22] .clock (clock), .reset (reset), .io_out_0 (_alloc_lfsr_prng_io_out_0), .io_out_1 (_alloc_lfsr_prng_io_out_1), .io_out_2 (_alloc_lfsr_prng_io_out_2), .io_out_3 (_alloc_lfsr_prng_io_out_3), .io_out_4 (_alloc_lfsr_prng_io_out_4), .io_out_5 (_alloc_lfsr_prng_io_out_5) ); // @[PRNG.scala:91:22] MaxPeriodFibonacciLFSR_4 alloc_lfsr_prng_1 ( // @[PRNG.scala:91:22] .clock (clock), .reset (reset), .io_out_0 (_alloc_lfsr_prng_1_io_out_0), .io_out_1 (_alloc_lfsr_prng_1_io_out_1), .io_out_2 (_alloc_lfsr_prng_1_io_out_2), .io_out_3 (_alloc_lfsr_prng_1_io_out_3), .io_out_4 (_alloc_lfsr_prng_1_io_out_4), .io_out_5 (_alloc_lfsr_prng_1_io_out_5) ); // @[PRNG.scala:91:22] MaxPeriodFibonacciLFSR_5 alloc_lfsr_prng_2 ( // @[PRNG.scala:91:22] .clock (clock), .reset (reset), .io_out_0 (_alloc_lfsr_prng_2_io_out_0), .io_out_1 (_alloc_lfsr_prng_2_io_out_1), .io_out_2 (_alloc_lfsr_prng_2_io_out_2), .io_out_3 (_alloc_lfsr_prng_2_io_out_3), .io_out_4 (_alloc_lfsr_prng_2_io_out_4), .io_out_5 (_alloc_lfsr_prng_2_io_out_5) ); // @[PRNG.scala:91:22] MaxPeriodFibonacciLFSR_6 alloc_lfsr_prng_3 ( // @[PRNG.scala:91:22] .clock (clock), .reset (reset), .io_out_0 (_alloc_lfsr_prng_3_io_out_0), .io_out_1 (_alloc_lfsr_prng_3_io_out_1), .io_out_2 (_alloc_lfsr_prng_3_io_out_2), .io_out_3 (_alloc_lfsr_prng_3_io_out_3), .io_out_4 (_alloc_lfsr_prng_3_io_out_4), .io_out_5 (_alloc_lfsr_prng_3_io_out_5) ); // @[PRNG.scala:91:22] assign io_resp_f1_0_taken = io_resp_f1_0_taken_0; // @[tage.scala:209:7] assign io_resp_f1_0_is_br = io_resp_f1_0_is_br_0; // @[tage.scala:209:7] assign io_resp_f1_0_is_jal = io_resp_f1_0_is_jal_0; // @[tage.scala:209:7] assign io_resp_f1_0_predicted_pc_valid = io_resp_f1_0_predicted_pc_valid_0; // @[tage.scala:209:7] assign io_resp_f1_0_predicted_pc_bits = io_resp_f1_0_predicted_pc_bits_0; // @[tage.scala:209:7] assign io_resp_f1_1_taken = io_resp_f1_1_taken_0; // @[tage.scala:209:7] assign io_resp_f1_1_is_br = io_resp_f1_1_is_br_0; // @[tage.scala:209:7] assign io_resp_f1_1_is_jal = io_resp_f1_1_is_jal_0; // @[tage.scala:209:7] assign io_resp_f1_1_predicted_pc_valid = io_resp_f1_1_predicted_pc_valid_0; // @[tage.scala:209:7] assign io_resp_f1_1_predicted_pc_bits = io_resp_f1_1_predicted_pc_bits_0; // @[tage.scala:209:7] assign io_resp_f1_2_taken = io_resp_f1_2_taken_0; // @[tage.scala:209:7] assign io_resp_f1_2_is_br = io_resp_f1_2_is_br_0; // @[tage.scala:209:7] assign io_resp_f1_2_is_jal = io_resp_f1_2_is_jal_0; // @[tage.scala:209:7] assign io_resp_f1_2_predicted_pc_valid = io_resp_f1_2_predicted_pc_valid_0; // @[tage.scala:209:7] assign io_resp_f1_2_predicted_pc_bits = io_resp_f1_2_predicted_pc_bits_0; // @[tage.scala:209:7] assign io_resp_f1_3_taken = io_resp_f1_3_taken_0; // @[tage.scala:209:7] assign io_resp_f1_3_is_br = io_resp_f1_3_is_br_0; // @[tage.scala:209:7] assign io_resp_f1_3_is_jal = io_resp_f1_3_is_jal_0; // @[tage.scala:209:7] assign io_resp_f1_3_predicted_pc_valid = io_resp_f1_3_predicted_pc_valid_0; // @[tage.scala:209:7] assign io_resp_f1_3_predicted_pc_bits = io_resp_f1_3_predicted_pc_bits_0; // @[tage.scala:209:7] assign io_resp_f2_0_taken = io_resp_f2_0_taken_0; // @[tage.scala:209:7] assign io_resp_f2_0_is_br = io_resp_f2_0_is_br_0; // @[tage.scala:209:7] assign io_resp_f2_0_is_jal = io_resp_f2_0_is_jal_0; // @[tage.scala:209:7] assign io_resp_f2_0_predicted_pc_valid = io_resp_f2_0_predicted_pc_valid_0; // @[tage.scala:209:7] assign io_resp_f2_0_predicted_pc_bits = io_resp_f2_0_predicted_pc_bits_0; // @[tage.scala:209:7] assign io_resp_f2_1_taken = io_resp_f2_1_taken_0; // @[tage.scala:209:7] assign io_resp_f2_1_is_br = io_resp_f2_1_is_br_0; // @[tage.scala:209:7] assign io_resp_f2_1_is_jal = io_resp_f2_1_is_jal_0; // @[tage.scala:209:7] assign io_resp_f2_1_predicted_pc_valid = io_resp_f2_1_predicted_pc_valid_0; // @[tage.scala:209:7] assign io_resp_f2_1_predicted_pc_bits = io_resp_f2_1_predicted_pc_bits_0; // @[tage.scala:209:7] assign io_resp_f2_2_taken = io_resp_f2_2_taken_0; // @[tage.scala:209:7] assign io_resp_f2_2_is_br = io_resp_f2_2_is_br_0; // @[tage.scala:209:7] assign io_resp_f2_2_is_jal = io_resp_f2_2_is_jal_0; // @[tage.scala:209:7] assign io_resp_f2_2_predicted_pc_valid = io_resp_f2_2_predicted_pc_valid_0; // @[tage.scala:209:7] assign io_resp_f2_2_predicted_pc_bits = io_resp_f2_2_predicted_pc_bits_0; // @[tage.scala:209:7] assign io_resp_f2_3_taken = io_resp_f2_3_taken_0; // @[tage.scala:209:7] assign io_resp_f2_3_is_br = io_resp_f2_3_is_br_0; // @[tage.scala:209:7] assign io_resp_f2_3_is_jal = io_resp_f2_3_is_jal_0; // @[tage.scala:209:7] assign io_resp_f2_3_predicted_pc_valid = io_resp_f2_3_predicted_pc_valid_0; // @[tage.scala:209:7] assign io_resp_f2_3_predicted_pc_bits = io_resp_f2_3_predicted_pc_bits_0; // @[tage.scala:209:7] assign io_resp_f3_0_taken = io_resp_f3_0_taken_0; // @[tage.scala:209:7] assign io_resp_f3_0_is_br = io_resp_f3_0_is_br_0; // @[tage.scala:209:7] assign io_resp_f3_0_is_jal = io_resp_f3_0_is_jal_0; // @[tage.scala:209:7] assign io_resp_f3_0_predicted_pc_valid = io_resp_f3_0_predicted_pc_valid_0; // @[tage.scala:209:7] assign io_resp_f3_0_predicted_pc_bits = io_resp_f3_0_predicted_pc_bits_0; // @[tage.scala:209:7] assign io_resp_f3_1_taken = io_resp_f3_1_taken_0; // @[tage.scala:209:7] assign io_resp_f3_1_is_br = io_resp_f3_1_is_br_0; // @[tage.scala:209:7] assign io_resp_f3_1_is_jal = io_resp_f3_1_is_jal_0; // @[tage.scala:209:7] assign io_resp_f3_1_predicted_pc_valid = io_resp_f3_1_predicted_pc_valid_0; // @[tage.scala:209:7] assign io_resp_f3_1_predicted_pc_bits = io_resp_f3_1_predicted_pc_bits_0; // @[tage.scala:209:7] assign io_resp_f3_2_taken = io_resp_f3_2_taken_0; // @[tage.scala:209:7] assign io_resp_f3_2_is_br = io_resp_f3_2_is_br_0; // @[tage.scala:209:7] assign io_resp_f3_2_is_jal = io_resp_f3_2_is_jal_0; // @[tage.scala:209:7] assign io_resp_f3_2_predicted_pc_valid = io_resp_f3_2_predicted_pc_valid_0; // @[tage.scala:209:7] assign io_resp_f3_2_predicted_pc_bits = io_resp_f3_2_predicted_pc_bits_0; // @[tage.scala:209:7] assign io_resp_f3_3_taken = io_resp_f3_3_taken_0; // @[tage.scala:209:7] assign io_resp_f3_3_is_br = io_resp_f3_3_is_br_0; // @[tage.scala:209:7] assign io_resp_f3_3_is_jal = io_resp_f3_3_is_jal_0; // @[tage.scala:209:7] assign io_resp_f3_3_predicted_pc_valid = io_resp_f3_3_predicted_pc_valid_0; // @[tage.scala:209:7] assign io_resp_f3_3_predicted_pc_bits = io_resp_f3_3_predicted_pc_bits_0; // @[tage.scala:209:7] assign io_f3_meta = io_f3_meta_0; // @[tage.scala:209:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready }
module dataMems_511( // @[UnsafeAXI4ToTL.scala:365:62] input [4:0] R0_addr, input R0_en, input R0_clk, output [66:0] R0_data, input [4:0] W0_addr, input W0_en, input W0_clk, input [66:0] W0_data ); dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data) ); // @[UnsafeAXI4ToTL.scala:365:62] endmodule
Generate the Verilog code corresponding to the following Chisel files. File INToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import consts._ class INToRecFN(intWidth: Int, expWidth: Int, sigWidth: Int) extends RawModule { override def desiredName = s"INToRecFN_i${intWidth}_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val signedIn = Input(Bool()) val in = Input(Bits(intWidth.W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val intAsRawFloat = rawFloatFromIN(io.signedIn, io.in); val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( intAsRawFloat.expWidth, intWidth, expWidth, sigWidth, flRoundOpt_sigMSBitAlwaysZero | flRoundOpt_neverUnderflows )) roundAnyRawFNToRecFN.io.invalidExc := false.B roundAnyRawFNToRecFN.io.infiniteExc := false.B roundAnyRawFNToRecFN.io.in := intAsRawFloat roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags } File rawFloatFromIN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ object rawFloatFromIN { def apply(signedIn: Bool, in: Bits): RawFloat = { val expWidth = log2Up(in.getWidth) + 1 //*** CHANGE THIS; CAN BE VERY LARGE: val extIntWidth = 1<<(expWidth - 1) val sign = signedIn && in(in.getWidth - 1) val absIn = Mux(sign, -in.asUInt, in.asUInt) val extAbsIn = (0.U(extIntWidth.W) ## absIn)(extIntWidth - 1, 0) val adjustedNormDist = countLeadingZeros(extAbsIn) val sig = (extAbsIn<<adjustedNormDist)( extIntWidth - 1, extIntWidth - in.getWidth) val out = Wire(new RawFloat(expWidth, in.getWidth)) out.isNaN := false.B out.isInf := false.B out.isZero := ! sig(in.getWidth - 1) out.sign := sign out.sExp := (2.U(2.W) ## ~adjustedNormDist(expWidth - 2, 0)).zext out.sig := sig out } }
module INToRecFN_i64_e11_s53_6( // @[INToRecFN.scala:43:7] input io_signedIn, // @[INToRecFN.scala:46:16] input [63:0] io_in, // @[INToRecFN.scala:46:16] input [2:0] io_roundingMode, // @[INToRecFN.scala:46:16] output [64:0] io_out, // @[INToRecFN.scala:46:16] output [4:0] io_exceptionFlags // @[INToRecFN.scala:46:16] ); wire io_signedIn_0 = io_signedIn; // @[INToRecFN.scala:43:7] wire [63:0] io_in_0 = io_in; // @[INToRecFN.scala:43:7] wire [2:0] io_roundingMode_0 = io_roundingMode; // @[INToRecFN.scala:43:7] wire intAsRawFloat_isNaN = 1'h0; // @[rawFloatFromIN.scala:59:23] wire intAsRawFloat_isInf = 1'h0; // @[rawFloatFromIN.scala:59:23] wire io_detectTininess = 1'h1; // @[INToRecFN.scala:43:7] wire [64:0] io_out_0; // @[INToRecFN.scala:43:7] wire [4:0] io_exceptionFlags_0; // @[INToRecFN.scala:43:7] wire _intAsRawFloat_sign_T = io_in_0[63]; // @[rawFloatFromIN.scala:51:34] wire intAsRawFloat_sign = io_signedIn_0 & _intAsRawFloat_sign_T; // @[rawFloatFromIN.scala:51:{29,34}] wire intAsRawFloat_sign_0 = intAsRawFloat_sign; // @[rawFloatFromIN.scala:51:29, :59:23] wire [64:0] _intAsRawFloat_absIn_T = 65'h0 - {1'h0, io_in_0}; // @[rawFloatFromIN.scala:52:31] wire [63:0] _intAsRawFloat_absIn_T_1 = _intAsRawFloat_absIn_T[63:0]; // @[rawFloatFromIN.scala:52:31] wire [63:0] intAsRawFloat_absIn = intAsRawFloat_sign ? _intAsRawFloat_absIn_T_1 : io_in_0; // @[rawFloatFromIN.scala:51:29, :52:{24,31}] wire [127:0] _intAsRawFloat_extAbsIn_T = {64'h0, intAsRawFloat_absIn}; // @[rawFloatFromIN.scala:52:24, :53:44] wire [63:0] intAsRawFloat_extAbsIn = _intAsRawFloat_extAbsIn_T[63:0]; // @[rawFloatFromIN.scala:53:{44,53}] wire _intAsRawFloat_adjustedNormDist_T = intAsRawFloat_extAbsIn[0]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_1 = intAsRawFloat_extAbsIn[1]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_2 = intAsRawFloat_extAbsIn[2]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_3 = intAsRawFloat_extAbsIn[3]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_4 = intAsRawFloat_extAbsIn[4]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_5 = intAsRawFloat_extAbsIn[5]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_6 = intAsRawFloat_extAbsIn[6]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_7 = intAsRawFloat_extAbsIn[7]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_8 = intAsRawFloat_extAbsIn[8]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_9 = intAsRawFloat_extAbsIn[9]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_10 = intAsRawFloat_extAbsIn[10]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_11 = intAsRawFloat_extAbsIn[11]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_12 = intAsRawFloat_extAbsIn[12]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_13 = intAsRawFloat_extAbsIn[13]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_14 = intAsRawFloat_extAbsIn[14]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_15 = intAsRawFloat_extAbsIn[15]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_16 = intAsRawFloat_extAbsIn[16]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_17 = intAsRawFloat_extAbsIn[17]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_18 = intAsRawFloat_extAbsIn[18]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_19 = intAsRawFloat_extAbsIn[19]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_20 = intAsRawFloat_extAbsIn[20]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_21 = intAsRawFloat_extAbsIn[21]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_22 = intAsRawFloat_extAbsIn[22]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_23 = intAsRawFloat_extAbsIn[23]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_24 = intAsRawFloat_extAbsIn[24]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_25 = intAsRawFloat_extAbsIn[25]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_26 = intAsRawFloat_extAbsIn[26]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_27 = intAsRawFloat_extAbsIn[27]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_28 = intAsRawFloat_extAbsIn[28]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_29 = intAsRawFloat_extAbsIn[29]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_30 = intAsRawFloat_extAbsIn[30]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_31 = intAsRawFloat_extAbsIn[31]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_32 = intAsRawFloat_extAbsIn[32]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_33 = intAsRawFloat_extAbsIn[33]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_34 = intAsRawFloat_extAbsIn[34]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_35 = intAsRawFloat_extAbsIn[35]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_36 = intAsRawFloat_extAbsIn[36]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_37 = intAsRawFloat_extAbsIn[37]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_38 = intAsRawFloat_extAbsIn[38]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_39 = intAsRawFloat_extAbsIn[39]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_40 = intAsRawFloat_extAbsIn[40]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_41 = intAsRawFloat_extAbsIn[41]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_42 = intAsRawFloat_extAbsIn[42]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_43 = intAsRawFloat_extAbsIn[43]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_44 = intAsRawFloat_extAbsIn[44]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_45 = intAsRawFloat_extAbsIn[45]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_46 = intAsRawFloat_extAbsIn[46]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_47 = intAsRawFloat_extAbsIn[47]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_48 = intAsRawFloat_extAbsIn[48]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_49 = intAsRawFloat_extAbsIn[49]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_50 = intAsRawFloat_extAbsIn[50]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_51 = intAsRawFloat_extAbsIn[51]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_52 = intAsRawFloat_extAbsIn[52]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_53 = intAsRawFloat_extAbsIn[53]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_54 = intAsRawFloat_extAbsIn[54]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_55 = intAsRawFloat_extAbsIn[55]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_56 = intAsRawFloat_extAbsIn[56]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_57 = intAsRawFloat_extAbsIn[57]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_58 = intAsRawFloat_extAbsIn[58]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_59 = intAsRawFloat_extAbsIn[59]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_60 = intAsRawFloat_extAbsIn[60]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_61 = intAsRawFloat_extAbsIn[61]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_62 = intAsRawFloat_extAbsIn[62]; // @[rawFloatFromIN.scala:53:53] wire _intAsRawFloat_adjustedNormDist_T_63 = intAsRawFloat_extAbsIn[63]; // @[rawFloatFromIN.scala:53:53] wire [5:0] _intAsRawFloat_adjustedNormDist_T_64 = {5'h1F, ~_intAsRawFloat_adjustedNormDist_T_1}; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_65 = _intAsRawFloat_adjustedNormDist_T_2 ? 6'h3D : _intAsRawFloat_adjustedNormDist_T_64; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_66 = _intAsRawFloat_adjustedNormDist_T_3 ? 6'h3C : _intAsRawFloat_adjustedNormDist_T_65; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_67 = _intAsRawFloat_adjustedNormDist_T_4 ? 6'h3B : _intAsRawFloat_adjustedNormDist_T_66; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_68 = _intAsRawFloat_adjustedNormDist_T_5 ? 6'h3A : _intAsRawFloat_adjustedNormDist_T_67; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_69 = _intAsRawFloat_adjustedNormDist_T_6 ? 6'h39 : _intAsRawFloat_adjustedNormDist_T_68; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_70 = _intAsRawFloat_adjustedNormDist_T_7 ? 6'h38 : _intAsRawFloat_adjustedNormDist_T_69; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_71 = _intAsRawFloat_adjustedNormDist_T_8 ? 6'h37 : _intAsRawFloat_adjustedNormDist_T_70; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_72 = _intAsRawFloat_adjustedNormDist_T_9 ? 6'h36 : _intAsRawFloat_adjustedNormDist_T_71; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_73 = _intAsRawFloat_adjustedNormDist_T_10 ? 6'h35 : _intAsRawFloat_adjustedNormDist_T_72; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_74 = _intAsRawFloat_adjustedNormDist_T_11 ? 6'h34 : _intAsRawFloat_adjustedNormDist_T_73; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_75 = _intAsRawFloat_adjustedNormDist_T_12 ? 6'h33 : _intAsRawFloat_adjustedNormDist_T_74; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_76 = _intAsRawFloat_adjustedNormDist_T_13 ? 6'h32 : _intAsRawFloat_adjustedNormDist_T_75; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_77 = _intAsRawFloat_adjustedNormDist_T_14 ? 6'h31 : _intAsRawFloat_adjustedNormDist_T_76; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_78 = _intAsRawFloat_adjustedNormDist_T_15 ? 6'h30 : _intAsRawFloat_adjustedNormDist_T_77; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_79 = _intAsRawFloat_adjustedNormDist_T_16 ? 6'h2F : _intAsRawFloat_adjustedNormDist_T_78; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_80 = _intAsRawFloat_adjustedNormDist_T_17 ? 6'h2E : _intAsRawFloat_adjustedNormDist_T_79; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_81 = _intAsRawFloat_adjustedNormDist_T_18 ? 6'h2D : _intAsRawFloat_adjustedNormDist_T_80; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_82 = _intAsRawFloat_adjustedNormDist_T_19 ? 6'h2C : _intAsRawFloat_adjustedNormDist_T_81; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_83 = _intAsRawFloat_adjustedNormDist_T_20 ? 6'h2B : _intAsRawFloat_adjustedNormDist_T_82; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_84 = _intAsRawFloat_adjustedNormDist_T_21 ? 6'h2A : _intAsRawFloat_adjustedNormDist_T_83; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_85 = _intAsRawFloat_adjustedNormDist_T_22 ? 6'h29 : _intAsRawFloat_adjustedNormDist_T_84; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_86 = _intAsRawFloat_adjustedNormDist_T_23 ? 6'h28 : _intAsRawFloat_adjustedNormDist_T_85; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_87 = _intAsRawFloat_adjustedNormDist_T_24 ? 6'h27 : _intAsRawFloat_adjustedNormDist_T_86; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_88 = _intAsRawFloat_adjustedNormDist_T_25 ? 6'h26 : _intAsRawFloat_adjustedNormDist_T_87; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_89 = _intAsRawFloat_adjustedNormDist_T_26 ? 6'h25 : _intAsRawFloat_adjustedNormDist_T_88; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_90 = _intAsRawFloat_adjustedNormDist_T_27 ? 6'h24 : _intAsRawFloat_adjustedNormDist_T_89; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_91 = _intAsRawFloat_adjustedNormDist_T_28 ? 6'h23 : _intAsRawFloat_adjustedNormDist_T_90; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_92 = _intAsRawFloat_adjustedNormDist_T_29 ? 6'h22 : _intAsRawFloat_adjustedNormDist_T_91; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_93 = _intAsRawFloat_adjustedNormDist_T_30 ? 6'h21 : _intAsRawFloat_adjustedNormDist_T_92; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_94 = _intAsRawFloat_adjustedNormDist_T_31 ? 6'h20 : _intAsRawFloat_adjustedNormDist_T_93; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_95 = _intAsRawFloat_adjustedNormDist_T_32 ? 6'h1F : _intAsRawFloat_adjustedNormDist_T_94; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_96 = _intAsRawFloat_adjustedNormDist_T_33 ? 6'h1E : _intAsRawFloat_adjustedNormDist_T_95; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_97 = _intAsRawFloat_adjustedNormDist_T_34 ? 6'h1D : _intAsRawFloat_adjustedNormDist_T_96; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_98 = _intAsRawFloat_adjustedNormDist_T_35 ? 6'h1C : _intAsRawFloat_adjustedNormDist_T_97; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_99 = _intAsRawFloat_adjustedNormDist_T_36 ? 6'h1B : _intAsRawFloat_adjustedNormDist_T_98; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_100 = _intAsRawFloat_adjustedNormDist_T_37 ? 6'h1A : _intAsRawFloat_adjustedNormDist_T_99; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_101 = _intAsRawFloat_adjustedNormDist_T_38 ? 6'h19 : _intAsRawFloat_adjustedNormDist_T_100; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_102 = _intAsRawFloat_adjustedNormDist_T_39 ? 6'h18 : _intAsRawFloat_adjustedNormDist_T_101; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_103 = _intAsRawFloat_adjustedNormDist_T_40 ? 6'h17 : _intAsRawFloat_adjustedNormDist_T_102; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_104 = _intAsRawFloat_adjustedNormDist_T_41 ? 6'h16 : _intAsRawFloat_adjustedNormDist_T_103; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_105 = _intAsRawFloat_adjustedNormDist_T_42 ? 6'h15 : _intAsRawFloat_adjustedNormDist_T_104; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_106 = _intAsRawFloat_adjustedNormDist_T_43 ? 6'h14 : _intAsRawFloat_adjustedNormDist_T_105; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_107 = _intAsRawFloat_adjustedNormDist_T_44 ? 6'h13 : _intAsRawFloat_adjustedNormDist_T_106; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_108 = _intAsRawFloat_adjustedNormDist_T_45 ? 6'h12 : _intAsRawFloat_adjustedNormDist_T_107; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_109 = _intAsRawFloat_adjustedNormDist_T_46 ? 6'h11 : _intAsRawFloat_adjustedNormDist_T_108; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_110 = _intAsRawFloat_adjustedNormDist_T_47 ? 6'h10 : _intAsRawFloat_adjustedNormDist_T_109; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_111 = _intAsRawFloat_adjustedNormDist_T_48 ? 6'hF : _intAsRawFloat_adjustedNormDist_T_110; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_112 = _intAsRawFloat_adjustedNormDist_T_49 ? 6'hE : _intAsRawFloat_adjustedNormDist_T_111; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_113 = _intAsRawFloat_adjustedNormDist_T_50 ? 6'hD : _intAsRawFloat_adjustedNormDist_T_112; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_114 = _intAsRawFloat_adjustedNormDist_T_51 ? 6'hC : _intAsRawFloat_adjustedNormDist_T_113; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_115 = _intAsRawFloat_adjustedNormDist_T_52 ? 6'hB : _intAsRawFloat_adjustedNormDist_T_114; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_116 = _intAsRawFloat_adjustedNormDist_T_53 ? 6'hA : _intAsRawFloat_adjustedNormDist_T_115; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_117 = _intAsRawFloat_adjustedNormDist_T_54 ? 6'h9 : _intAsRawFloat_adjustedNormDist_T_116; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_118 = _intAsRawFloat_adjustedNormDist_T_55 ? 6'h8 : _intAsRawFloat_adjustedNormDist_T_117; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_119 = _intAsRawFloat_adjustedNormDist_T_56 ? 6'h7 : _intAsRawFloat_adjustedNormDist_T_118; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_120 = _intAsRawFloat_adjustedNormDist_T_57 ? 6'h6 : _intAsRawFloat_adjustedNormDist_T_119; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_121 = _intAsRawFloat_adjustedNormDist_T_58 ? 6'h5 : _intAsRawFloat_adjustedNormDist_T_120; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_122 = _intAsRawFloat_adjustedNormDist_T_59 ? 6'h4 : _intAsRawFloat_adjustedNormDist_T_121; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_123 = _intAsRawFloat_adjustedNormDist_T_60 ? 6'h3 : _intAsRawFloat_adjustedNormDist_T_122; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_124 = _intAsRawFloat_adjustedNormDist_T_61 ? 6'h2 : _intAsRawFloat_adjustedNormDist_T_123; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_adjustedNormDist_T_125 = _intAsRawFloat_adjustedNormDist_T_62 ? 6'h1 : _intAsRawFloat_adjustedNormDist_T_124; // @[Mux.scala:50:70] wire [5:0] intAsRawFloat_adjustedNormDist = _intAsRawFloat_adjustedNormDist_T_63 ? 6'h0 : _intAsRawFloat_adjustedNormDist_T_125; // @[Mux.scala:50:70] wire [5:0] _intAsRawFloat_out_sExp_T = intAsRawFloat_adjustedNormDist; // @[Mux.scala:50:70] wire [126:0] _intAsRawFloat_sig_T = {63'h0, intAsRawFloat_extAbsIn} << intAsRawFloat_adjustedNormDist; // @[Mux.scala:50:70] wire [63:0] intAsRawFloat_sig = _intAsRawFloat_sig_T[63:0]; // @[rawFloatFromIN.scala:56:{22,41}] wire _intAsRawFloat_out_isZero_T_1; // @[rawFloatFromIN.scala:62:23] wire [8:0] _intAsRawFloat_out_sExp_T_3; // @[rawFloatFromIN.scala:64:72] wire intAsRawFloat_isZero; // @[rawFloatFromIN.scala:59:23] wire [8:0] intAsRawFloat_sExp; // @[rawFloatFromIN.scala:59:23] wire [64:0] intAsRawFloat_sig_0; // @[rawFloatFromIN.scala:59:23] wire _intAsRawFloat_out_isZero_T = intAsRawFloat_sig[63]; // @[rawFloatFromIN.scala:56:41, :62:28] assign _intAsRawFloat_out_isZero_T_1 = ~_intAsRawFloat_out_isZero_T; // @[rawFloatFromIN.scala:62:{23,28}] assign intAsRawFloat_isZero = _intAsRawFloat_out_isZero_T_1; // @[rawFloatFromIN.scala:59:23, :62:23] wire [5:0] _intAsRawFloat_out_sExp_T_1 = ~_intAsRawFloat_out_sExp_T; // @[rawFloatFromIN.scala:64:{36,53}] wire [7:0] _intAsRawFloat_out_sExp_T_2 = {2'h2, _intAsRawFloat_out_sExp_T_1}; // @[rawFloatFromIN.scala:64:{33,36}] assign _intAsRawFloat_out_sExp_T_3 = {1'h0, _intAsRawFloat_out_sExp_T_2}; // @[rawFloatFromIN.scala:64:{33,72}] assign intAsRawFloat_sExp = _intAsRawFloat_out_sExp_T_3; // @[rawFloatFromIN.scala:59:23, :64:72] assign intAsRawFloat_sig_0 = {1'h0, intAsRawFloat_sig}; // @[rawFloatFromIN.scala:56:41, :59:23, :65:20] RoundAnyRawFNToRecFN_ie7_is64_oe11_os53_6 roundAnyRawFNToRecFN ( // @[INToRecFN.scala:60:15] .io_in_isZero (intAsRawFloat_isZero), // @[rawFloatFromIN.scala:59:23] .io_in_sign (intAsRawFloat_sign_0), // @[rawFloatFromIN.scala:59:23] .io_in_sExp (intAsRawFloat_sExp), // @[rawFloatFromIN.scala:59:23] .io_in_sig (intAsRawFloat_sig_0), // @[rawFloatFromIN.scala:59:23] .io_roundingMode (io_roundingMode_0), // @[INToRecFN.scala:43:7] .io_out (io_out_0), .io_exceptionFlags (io_exceptionFlags_0) ); // @[INToRecFN.scala:60:15] assign io_out = io_out_0; // @[INToRecFN.scala:43:7] assign io_exceptionFlags = io_exceptionFlags_0; // @[INToRecFN.scala:43:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module MacUnit_247( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [7:0] io_in_a, // @[PE.scala:16:14] input [7:0] io_in_b, // @[PE.scala:16:14] input [19:0] io_in_c, // @[PE.scala:16:14] output [19:0] io_out_d // @[PE.scala:16:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7] wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7] wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54] wire [19:0] io_out_d_0; // @[PE.scala:14:7] wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7] wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54] assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54] assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7] assign io_out_d = io_out_d_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundRawFNToRecFN_e8_s24_69( // @[RoundAnyRawFNToRecFN.scala:295:5] input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16] input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16] input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16] output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16] ); wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5] RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_69 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15] .io_invalidExc (io_invalidExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isNaN (io_in_isNaN_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isInf (io_in_isInf_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isZero (io_in_isZero_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sign (io_in_sign_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sExp (io_in_sExp_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sig (io_in_sig_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_out (io_out_0), .io_exceptionFlags (io_exceptionFlags_0) ); // @[RoundAnyRawFNToRecFN.scala:310:15] assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Transposer.scala: package gemmini import chisel3._ import chisel3.util._ import Util._ trait Transposer[T <: Data] extends Module { def dim: Int def dataType: T val io = IO(new Bundle { val inRow = Flipped(Decoupled(Vec(dim, dataType))) val outCol = Decoupled(Vec(dim, dataType)) }) } class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose val sMoveUp :: sMoveLeft :: Nil = Enum(2) val state = RegInit(sMoveUp) val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1) val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1) io.outCol.valid := 0.U io.inRow.ready := 0.U switch(state) { is(sMoveUp) { io.inRow.ready := upCounter <= dim.U io.outCol.valid := leftCounter > 0.U when(io.inRow.fire) { upCounter := upCounter + 1.U } when(upCounter === (dim-1).U) { state := sMoveLeft leftCounter := 0.U } when(io.outCol.fire) { leftCounter := leftCounter - 1.U } } is(sMoveLeft) { io.inRow.ready := leftCounter <= dim.U // TODO: this is naive io.outCol.valid := upCounter > 0.U when(leftCounter === (dim-1).U) { state := sMoveUp } when(io.inRow.fire) { leftCounter := leftCounter + 1.U upCounter := 0.U } when(io.outCol.fire) { upCounter := upCounter - 1.U } } } // Propagate input from bottom row to top row systolically in the move up phase // TODO: need to iterate over columns to connect Chisel values of type T // Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?) for (colIdx <- 0 until dim) { regArray.foldRight(io.inRow.bits(colIdx)) { case (regRow, prevReg) => when (state === sMoveUp) { regRow(colIdx) := prevReg } regRow(colIdx) } } // Propagate input from right side to left side systolically in the move left phase for (rowIdx <- 0 until dim) { regArrayT.foldRight(io.inRow.bits(rowIdx)) { case (regCol, prevReg) => when (state === sMoveLeft) { regCol(rowIdx) := prevReg } regCol(rowIdx) } } // Pull from the left side or the top side based on the state for (idx <- 0 until dim) { when (state === sMoveUp) { io.outCol.bits(idx) := regArray(0)(idx) }.elsewhen(state === sMoveLeft) { io.outCol.bits(idx) := regArrayT(0)(idx) }.otherwise { io.outCol.bits(idx) := DontCare } } } class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val LEFT_DIR = 0.U(1.W) val UP_DIR = 1.U(1.W) class PE extends Module { val io = IO(new Bundle { val inR = Input(dataType) val inD = Input(dataType) val outL = Output(dataType) val outU = Output(dataType) val dir = Input(UInt(1.W)) val en = Input(Bool()) }) val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en) io.outU := reg io.outL := reg } val pes = Seq.fill(dim,dim)(Module(new PE)) val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter val dir = RegInit(LEFT_DIR) // Wire up horizontal signals for (row <- 0 until dim; col <- 0 until dim) { val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL pes(row)(col).io.inR := right_in } // Wire up vertical signals for (row <- 0 until dim; col <- 0 until dim) { val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU pes(row)(col).io.inD := down_in } // Wire up global signals pes.flatten.foreach(_.io.dir := dir) pes.flatten.foreach(_.io.en := io.inRow.fire) io.outCol.valid := true.B io.inRow.ready := true.B val left_out = VecInit(pes.transpose.head.map(_.io.outL)) val up_out = VecInit(pes.head.map(_.io.outU)) io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out) when (io.inRow.fire) { counter := wrappingAdd(counter, 1.U, dim) } when (counter === (dim-1).U && io.inRow.fire) { dir := ~dir } } class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose // state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise val state = RegInit(0.U(1.W)) val countInc = io.inRow.fire || io.outCol.fire val (countValue, countWrap) = Counter(countInc, dim) io.inRow.ready := state === 0.U io.outCol.valid := state === 1.U for (i <- 0 until dim) { for (j <- 0 until dim) { when(countValue === i.U && io.inRow.fire) { regArray(i)(j) := io.inRow.bits(j) } } } for (i <- 0 until dim) { io.outCol.bits(i) := 0.U for (j <- 0 until dim) { when(countValue === j.U) { io.outCol.bits(i) := regArrayT(j)(i) } } } when (io.inRow.fire && countWrap) { state := 1.U } when (io.outCol.fire && countWrap) { state := 0.U } assert(!(state === 0.U) || !io.outCol.fire) assert(!(state === 1.U) || !io.inRow.fire) }
module PE_254( // @[Transposer.scala:100:9] input clock, // @[Transposer.scala:100:9] input reset, // @[Transposer.scala:100:9] input [7:0] io_inR, // @[Transposer.scala:101:16] input [7:0] io_inD, // @[Transposer.scala:101:16] output [7:0] io_outL, // @[Transposer.scala:101:16] output [7:0] io_outU, // @[Transposer.scala:101:16] input io_dir, // @[Transposer.scala:101:16] input io_en // @[Transposer.scala:101:16] ); wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9] wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9] wire io_dir_0 = io_dir; // @[Transposer.scala:100:9] wire io_en_0 = io_en; // @[Transposer.scala:100:9] wire [7:0] io_outL_0; // @[Transposer.scala:100:9] wire [7:0] io_outU_0; // @[Transposer.scala:100:9] wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36] wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}] reg [7:0] reg_0; // @[Transposer.scala:110:24] assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24] assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24] always @(posedge clock) begin // @[Transposer.scala:100:9] if (io_en_0) // @[Transposer.scala:100:9] reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}] always @(posedge) assign io_outL = io_outL_0; // @[Transposer.scala:100:9] assign io_outU = io_outU_0; // @[Transposer.scala:100:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File SwitchAllocator.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ class SwitchAllocReq(val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams]) (implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) val tail = Bool() } class SwitchArbiter(inN: Int, outN: Int, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Module { val io = IO(new Bundle { val in = Flipped(Vec(inN, Decoupled(new SwitchAllocReq(outParams, egressParams)))) val out = Vec(outN, Decoupled(new SwitchAllocReq(outParams, egressParams))) val chosen_oh = Vec(outN, Output(UInt(inN.W))) }) val lock = Seq.fill(outN) { RegInit(0.U(inN.W)) } val unassigned = Cat(io.in.map(_.valid).reverse) & ~(lock.reduce(_|_)) val mask = RegInit(0.U(inN.W)) val choices = Wire(Vec(outN, UInt(inN.W))) var sel = PriorityEncoderOH(Cat(unassigned, unassigned & ~mask)) for (i <- 0 until outN) { choices(i) := sel | (sel >> inN) sel = PriorityEncoderOH(unassigned & ~choices(i)) } io.in.foreach(_.ready := false.B) var chosens = 0.U(inN.W) val in_tails = Cat(io.in.map(_.bits.tail).reverse) for (i <- 0 until outN) { val in_valids = Cat((0 until inN).map { j => io.in(j).valid && !chosens(j) }.reverse) val chosen = Mux((in_valids & lock(i) & ~chosens).orR, lock(i), choices(i)) io.chosen_oh(i) := chosen io.out(i).valid := (in_valids & chosen).orR io.out(i).bits := Mux1H(chosen, io.in.map(_.bits)) for (j <- 0 until inN) { when (chosen(j) && io.out(i).ready) { io.in(j).ready := true.B } } chosens = chosens | chosen when (io.out(i).fire) { lock(i) := chosen & ~in_tails } } when (io.out(0).fire) { mask := (0 until inN).map { i => (io.chosen_oh(0) >> i) }.reduce(_|_) } .otherwise { mask := Mux(~mask === 0.U, 0.U, (mask << 1) | 1.U(1.W)) } } class SwitchAllocator( val routerParams: RouterParams, val inParams: Seq[ChannelParams], val outParams: Seq[ChannelParams], val ingressParams: Seq[IngressChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterParams with HasRouterInputParams with HasRouterOutputParams { val io = IO(new Bundle { val req = MixedVec(allInParams.map(u => Vec(u.destSpeedup, Flipped(Decoupled(new SwitchAllocReq(outParams, egressParams)))))) val credit_alloc = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Output(new OutputCreditAlloc))}) val switch_sel = MixedVec(allOutParams.map { o => Vec(o.srcSpeedup, MixedVec(allInParams.map { i => Vec(i.destSpeedup, Output(Bool())) })) }) }) val nInputChannels = allInParams.map(_.nVirtualChannels).sum val arbs = allOutParams.map { oP => Module(new SwitchArbiter( allInParams.map(_.destSpeedup).reduce(_+_), oP.srcSpeedup, outParams, egressParams ))} arbs.foreach(_.io.out.foreach(_.ready := true.B)) var idx = 0 io.req.foreach(_.foreach { o => val fires = Wire(Vec(arbs.size, Bool())) arbs.zipWithIndex.foreach { case (a,i) => a.io.in(idx).valid := o.valid && o.bits.vc_sel(i).reduce(_||_) a.io.in(idx).bits := o.bits fires(i) := a.io.in(idx).fire } o.ready := fires.reduce(_||_) idx += 1 }) for (i <- 0 until nAllOutputs) { for (j <- 0 until allOutParams(i).srcSpeedup) { idx = 0 for (m <- 0 until nAllInputs) { for (n <- 0 until allInParams(m).destSpeedup) { io.switch_sel(i)(j)(m)(n) := arbs(i).io.in(idx).valid && arbs(i).io.chosen_oh(j)(idx) && arbs(i).io.out(j).valid idx += 1 } } } } io.credit_alloc.foreach(_.foreach(_.alloc := false.B)) io.credit_alloc.foreach(_.foreach(_.tail := false.B)) (arbs zip io.credit_alloc).zipWithIndex.map { case ((a,i),t) => for (j <- 0 until i.size) { for (k <- 0 until a.io.out.size) { when (a.io.out(k).valid && a.io.out(k).bits.vc_sel(t)(j)) { i(j).alloc := true.B i(j).tail := a.io.out(k).bits.tail } } } } }
module SwitchArbiter_135( // @[SwitchAllocator.scala:17:7] input clock, // @[SwitchAllocator.scala:17:7] input reset, // @[SwitchAllocator.scala:17:7] output io_in_1_ready, // @[SwitchAllocator.scala:18:14] input io_in_1_valid, // @[SwitchAllocator.scala:18:14] input io_in_1_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_1_bits_vc_sel_2_1, // @[SwitchAllocator.scala:18:14] input io_in_1_bits_vc_sel_0_1, // @[SwitchAllocator.scala:18:14] input io_in_1_bits_tail, // @[SwitchAllocator.scala:18:14] input io_out_0_ready, // @[SwitchAllocator.scala:18:14] output io_out_0_valid, // @[SwitchAllocator.scala:18:14] output io_out_0_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] output io_out_0_bits_vc_sel_2_1, // @[SwitchAllocator.scala:18:14] output io_out_0_bits_vc_sel_0_1, // @[SwitchAllocator.scala:18:14] output io_out_0_bits_tail, // @[SwitchAllocator.scala:18:14] output [1:0] io_chosen_oh_0 // @[SwitchAllocator.scala:18:14] ); reg [1:0] lock_0; // @[SwitchAllocator.scala:24:38] wire [1:0] unassigned = {io_in_1_valid, 1'h0} & ~lock_0; // @[SwitchAllocator.scala:17:7, :24:38, :25:{23,52,54}] reg [1:0] mask; // @[SwitchAllocator.scala:27:21] wire [1:0] _sel_T_1 = unassigned & ~mask; // @[SwitchAllocator.scala:25:52, :27:21, :30:{58,60}] wire [3:0] sel = _sel_T_1[0] ? 4'h1 : _sel_T_1[1] ? 4'h2 : unassigned[0] ? 4'h4 : {unassigned[1], 3'h0}; // @[OneHot.scala:85:71] wire [1:0] chosen = io_in_1_valid & lock_0[1] ? lock_0 : sel[1:0] | sel[3:2]; // @[Mux.scala:50:70] wire _io_out_0_valid_T = io_in_1_valid & chosen[1]; // @[SwitchAllocator.scala:42:21, :44:35] wire _GEN = io_out_0_ready & _io_out_0_valid_T; // @[Decoupled.scala:51:35] always @(posedge clock) begin // @[SwitchAllocator.scala:17:7] if (reset) begin // @[SwitchAllocator.scala:17:7] lock_0 <= 2'h0; // @[SwitchAllocator.scala:24:38] mask <= 2'h0; // @[SwitchAllocator.scala:27:21] end else begin // @[SwitchAllocator.scala:17:7] if (_GEN) // @[Decoupled.scala:51:35] lock_0 <= chosen & {~io_in_1_bits_tail, 1'h1}; // @[SwitchAllocator.scala:17:7, :24:38, :39:21, :42:21, :53:{25,27}] mask <= _GEN ? {chosen[1], |chosen} : (&mask) ? 2'h0 : {mask[0], 1'h1}; // @[Decoupled.scala:51:35] end always @(posedge)
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } } File Arbiter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ object TLArbiter { // (valids, select) => readys type Policy = (Integer, UInt, Bool) => UInt val lowestIndexFirst: Policy = (width, valids, select) => ~(leftOR(valids) << 1)(width-1, 0) val highestIndexFirst: Policy = (width, valids, select) => ~((rightOR(valids) >> 1).pad(width)) val roundRobin: Policy = (width, valids, select) => if (width == 1) 1.U(1.W) else { val valid = valids(width-1, 0) assert (valid === valids) val mask = RegInit(((BigInt(1) << width)-1).U(width-1,0)) val filter = Cat(valid & ~mask, valid) val unready = (rightOR(filter, width*2, width) >> 1) | (mask << width) val readys = ~((unready >> width) & unready(width-1, 0)) when (select && valid.orR) { mask := leftOR(readys & valid, width) } readys(width-1, 0) } def lowestFromSeq[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: Seq[DecoupledIO[T]]): Unit = { apply(lowestIndexFirst)(sink, sources.map(s => (edge.numBeats1(s.bits), s)):_*) } def lowest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(lowestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def highest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(highestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def robin[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(roundRobin)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def apply[T <: Data](policy: Policy)(sink: DecoupledIO[T], sources: (UInt, DecoupledIO[T])*): Unit = { if (sources.isEmpty) { sink.bits := DontCare } else if (sources.size == 1) { sink :<>= sources.head._2 } else { val pairs = sources.toList val beatsIn = pairs.map(_._1) val sourcesIn = pairs.map(_._2) // The number of beats which remain to be sent val beatsLeft = RegInit(0.U) val idle = beatsLeft === 0.U val latch = idle && sink.ready // winner (if any) claims sink // Who wants access to the sink? val valids = sourcesIn.map(_.valid) // Arbitrate amongst the requests val readys = VecInit(policy(valids.size, Cat(valids.reverse), latch).asBools) // Which request wins arbitration? val winner = VecInit((readys zip valids) map { case (r,v) => r&&v }) // Confirm the policy works properly require (readys.size == valids.size) // Never two winners val prefixOR = winner.scanLeft(false.B)(_||_).init assert((prefixOR zip winner) map { case (p,w) => !p || !w } reduce {_ && _}) // If there was any request, there is a winner assert (!valids.reduce(_||_) || winner.reduce(_||_)) // Track remaining beats val maskedBeats = (winner zip beatsIn) map { case (w,b) => Mux(w, b, 0.U) } val initBeats = maskedBeats.reduce(_ | _) // no winner => 0 beats beatsLeft := Mux(latch, initBeats, beatsLeft - sink.fire) // The one-hot source granted access in the previous cycle val state = RegInit(VecInit(Seq.fill(sources.size)(false.B))) val muxState = Mux(idle, winner, state) state := muxState val allowed = Mux(idle, readys, state) (sourcesIn zip allowed) foreach { case (s, r) => s.ready := sink.ready && r } sink.valid := Mux(idle, valids.reduce(_||_), Mux1H(state, valids)) sink.bits :<= Mux1H(muxState, sourcesIn.map(_.bits)) } } } // Synthesizable unit tests import freechips.rocketchip.unittest._ abstract class DecoupledArbiterTest( policy: TLArbiter.Policy, txns: Int, timeout: Int, val numSources: Int, beatsLeftFromIdx: Int => UInt) (implicit p: Parameters) extends UnitTest(timeout) { val sources = Wire(Vec(numSources, DecoupledIO(UInt(log2Ceil(numSources).W)))) dontTouch(sources.suggestName("sources")) val sink = Wire(DecoupledIO(UInt(log2Ceil(numSources).W))) dontTouch(sink.suggestName("sink")) val count = RegInit(0.U(log2Ceil(txns).W)) val lfsr = LFSR(16, true.B) sources.zipWithIndex.map { case (z, i) => z.bits := i.U } TLArbiter(policy)(sink, sources.zipWithIndex.map { case (z, i) => (beatsLeftFromIdx(i), z) }:_*) count := count + 1.U io.finished := count >= txns.U } /** This tests that when a specific pattern of source valids are driven, * a new index from amongst that pattern is always selected, * unless one of those sources takes multiple beats, * in which case the same index should be selected until the arbiter goes idle. */ class TLDecoupledArbiterRobinTest(txns: Int = 128, timeout: Int = 500000, print: Boolean = false) (implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.roundRobin, txns, timeout, 6, i => i.U) { val lastWinner = RegInit((numSources+1).U) val beatsLeft = RegInit(0.U(log2Ceil(numSources).W)) val first = lastWinner > numSources.U val valid = lfsr(0) val ready = lfsr(15) sink.ready := ready sources.zipWithIndex.map { // pattern: every even-indexed valid is driven the same random way case (s, i) => s.valid := (if (i % 2 == 1) false.B else valid) } when (sink.fire) { if (print) { printf("TestRobin: %d\n", sink.bits) } when (beatsLeft === 0.U) { assert(lastWinner =/= sink.bits, "Round robin did not pick a new idx despite one being valid.") lastWinner := sink.bits beatsLeft := sink.bits } .otherwise { assert(lastWinner === sink.bits, "Round robin did not pick the same index over multiple beats") beatsLeft := beatsLeft - 1.U } } if (print) { when (!sink.fire) { printf("TestRobin: idle (%d %d)\n", valid, ready) } } } /** This tests that the lowest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterLowestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.lowestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertLowest(id: Int): Unit = { when (sources(id).valid) { assert((numSources-1 until id by -1).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a higher valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertLowest(_)) } } /** This tests that the highest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterHighestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.highestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertHighest(id: Int): Unit = { when (sources(id).valid) { assert((0 until id).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a lower valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertHighest(_)) } } File Xbar.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue} import freechips.rocketchip.util.BundleField // Trades off slave port proximity against routing resource cost object ForceFanout { def apply[T]( a: TriStateValue = TriStateValue.unset, b: TriStateValue = TriStateValue.unset, c: TriStateValue = TriStateValue.unset, d: TriStateValue = TriStateValue.unset, e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) = { body(p.alterPartial { case ForceFanoutKey => p(ForceFanoutKey) match { case ForceFanoutParams(pa, pb, pc, pd, pe) => ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe)) } }) } } private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean) private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false)) class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule { val node = new TLNexusNode( clientFn = { seq => seq(0).v1copy( echoFields = BundleField.union(seq.flatMap(_.echoFields)), requestFields = BundleField.union(seq.flatMap(_.requestFields)), responseKeys = seq.flatMap(_.responseKeys).distinct, minLatency = seq.map(_.minLatency).min, clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) => port.clients map { client => client.v1copy( sourceId = client.sourceId.shift(range.start) )} } ) }, managerFn = { seq => val fifoIdFactory = TLXbar.relabeler() seq(0).v1copy( responseFields = BundleField.union(seq.flatMap(_.responseFields)), requestKeys = seq.flatMap(_.requestKeys).distinct, minLatency = seq.map(_.minLatency).min, endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max, managers = seq.flatMap { port => require (port.beatBytes == seq(0).beatBytes, s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B") val fifoIdMapper = fifoIdFactory() port.managers map { manager => manager.v1copy( fifoId = manager.fifoId.map(fifoIdMapper(_)) )} } ) } ){ override def circuitIdentity = outputs.size == 1 && inputs.size == 1 } lazy val module = new Impl class Impl extends LazyModuleImp(this) { if ((node.in.size * node.out.size) > (8*32)) { println (s"!!! WARNING !!!") println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.") println (s"!!! WARNING !!!") } val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle)) override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_") TLXbar.circuit(policy, node.in, node.out) } } object TLXbar { def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId)) def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId)) def assignRanges(sizes: Seq[Int]) = { val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) } val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions val ranges = (tuples zip starts) map { case ((sz, i), st) => (if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i) } ranges.sortBy(_._2).map(_._1) // Restore orignal order } def relabeler() = { var idFactory = 0 () => { val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int] (x: Int) => { if (fifoMap.contains(x)) fifoMap(x) else { val out = idFactory idFactory = idFactory + 1 fifoMap += (x -> out) out } } } } def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) { val (io_in, edgesIn) = seqIn.unzip val (io_out, edgesOut) = seqOut.unzip // Not every master need connect to every slave on every channel; determine which connections are necessary val reachableIO = edgesIn.map { cp => edgesOut.map { mp => cp.client.clients.exists { c => mp.manager.managers.exists { m => c.visibility.exists { ca => m.address.exists { ma => ca.overlaps(ma)}}}} }.toVector}.toVector val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) => (edgesOut zip reachableO).map { case (mp, reachable) => reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED) }.toVector}.toVector val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) => (edgesOut zip reachableO).map { case (mp, reachable) => reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB }.toVector}.toVector val connectAIO = reachableIO val connectBIO = probeIO val connectCIO = releaseIO val connectDIO = reachableIO val connectEIO = releaseIO def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } } val connectAOI = transpose(connectAIO) val connectBOI = transpose(connectBIO) val connectCOI = transpose(connectCIO) val connectDOI = transpose(connectDIO) val connectEOI = transpose(connectEIO) // Grab the port ID mapping val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client)) val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager)) // We need an intermediate size of bundle with the widest possible identifiers val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params)) // Handle size = 1 gracefully (Chisel3 empty range is broken) def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0) // Transform input bundle sources (sinks use global namespace on both sides) val in = Wire(Vec(io_in.size, TLBundle(wide_bundle))) for (i <- 0 until in.size) { val r = inputIdRanges(i) if (connectAIO(i).exists(x=>x)) { in(i).a.bits.user := DontCare in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll in(i).a.bits.source := io_in(i).a.bits.source | r.start.U } else { in(i).a := DontCare io_in(i).a := DontCare in(i).a.valid := false.B io_in(i).a.ready := true.B } if (connectBIO(i).exists(x=>x)) { io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size) } else { in(i).b := DontCare io_in(i).b := DontCare in(i).b.ready := true.B io_in(i).b.valid := false.B } if (connectCIO(i).exists(x=>x)) { in(i).c.bits.user := DontCare in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll in(i).c.bits.source := io_in(i).c.bits.source | r.start.U } else { in(i).c := DontCare io_in(i).c := DontCare in(i).c.valid := false.B io_in(i).c.ready := true.B } if (connectDIO(i).exists(x=>x)) { io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size) } else { in(i).d := DontCare io_in(i).d := DontCare in(i).d.ready := true.B io_in(i).d.valid := false.B } if (connectEIO(i).exists(x=>x)) { in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll } else { in(i).e := DontCare io_in(i).e := DontCare in(i).e.valid := false.B io_in(i).e.ready := true.B } } // Transform output bundle sinks (sources use global namespace on both sides) val out = Wire(Vec(io_out.size, TLBundle(wide_bundle))) for (o <- 0 until out.size) { val r = outputIdRanges(o) if (connectAOI(o).exists(x=>x)) { out(o).a.bits.user := DontCare io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll } else { out(o).a := DontCare io_out(o).a := DontCare out(o).a.ready := true.B io_out(o).a.valid := false.B } if (connectBOI(o).exists(x=>x)) { out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll } else { out(o).b := DontCare io_out(o).b := DontCare out(o).b.valid := false.B io_out(o).b.ready := true.B } if (connectCOI(o).exists(x=>x)) { out(o).c.bits.user := DontCare io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll } else { out(o).c := DontCare io_out(o).c := DontCare out(o).c.ready := true.B io_out(o).c.valid := false.B } if (connectDOI(o).exists(x=>x)) { out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U } else { out(o).d := DontCare io_out(o).d := DontCare out(o).d.valid := false.B io_out(o).d.ready := true.B } if (connectEOI(o).exists(x=>x)) { io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size) } else { out(o).e := DontCare io_out(o).e := DontCare out(o).e.ready := true.B io_out(o).e.valid := false.B } } // Filter a list to only those elements selected def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1) // Based on input=>output connectivity, create per-input minimal address decode circuits val requiredAC = (connectAIO ++ connectCIO).distinct val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO => val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address)) val routingMask = AddressDecoder(filter(port_addrs, connectO)) val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct)) // Print the address mapping if (false) { println("Xbar mapping:") route_addrs.foreach { p => print(" ") p.foreach { a => print(s" ${a}") } println("") } println("--") } (connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _))) }.toMap // Print the ID mapping if (false) { println(s"XBar mapping:") (edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) => println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}") } println("") } val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) } val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) } def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } } val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } } val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } } val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } } val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } } val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) } val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) } val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) } val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) } val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) } // Fanout the input sources to the output sinks val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) }) val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) }) val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) }) val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) }) val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) }) // Arbitrate amongst the sources for (o <- 0 until out.size) { TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*) TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*) TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*) filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B } filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B } filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B } } for (i <- 0 until in.size) { TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*) TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*) filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B } filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B } } } def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode = { val xbar = LazyModule(new TLXbar(policy, nameSuffix)) xbar.node } // Replicate an input port to each output port def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = { val filtered = Wire(Vec(select.size, chiselTypeOf(input))) for (i <- 0 until select.size) { filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits) filtered(i).valid := input.valid && (select(i) || (select.size == 1).B) } input.ready := Mux1H(select, filtered.map(_.ready)) filtered } } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("Xbar")) val xbar = LazyModule(new TLXbar) xbar.node := TLDelayer(0.1) := model.node := fuzz.node (0 until nManagers) foreach { n => val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff))) ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node } lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module) dut.io.start := io.start io.finished := dut.io.finished } class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val xbar = LazyModule(new TLXbar) val fuzzers = (0 until nClients) map { n => val fuzz = LazyModule(new TLFuzzer(txns)) xbar.node := TLDelayer(0.1) := fuzz.node fuzz } (0 until nManagers) foreach { n => val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff))) ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node } lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzzers.last.module.io.finished } } class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module) dut.io.start := io.start io.finished := dut.io.finished }
module TLXbar_fbus_i4_o1_a32d64s6k4z4u( // @[Xbar.scala:74:9] input clock, // @[Xbar.scala:74:9] input reset, // @[Xbar.scala:74:9] output auto_anon_in_3_a_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_in_3_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_3_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [3:0] auto_anon_in_3_a_bits_size, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_3_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_anon_in_3_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_in_3_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_in_3_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_in_3_d_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_in_3_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_in_3_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_anon_in_3_d_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_in_3_d_bits_size, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_in_3_d_bits_source, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_in_3_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_anon_in_3_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_in_3_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_in_3_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_in_2_a_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_in_2_a_valid, // @[LazyModuleImp.scala:107:25] input [3:0] auto_anon_in_2_a_bits_size, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_2_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_anon_in_2_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_in_2_a_bits_mask, // @[LazyModuleImp.scala:107:25] output auto_anon_in_2_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_in_2_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_anon_in_2_d_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_in_2_d_bits_size, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_in_2_d_bits_source, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_in_2_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_anon_in_2_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_in_2_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_in_2_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_in_1_a_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_in_1_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_1_a_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_anon_in_1_a_bits_size, // @[LazyModuleImp.scala:107:25] input [3:0] auto_anon_in_1_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_anon_in_1_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_in_1_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_in_1_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_in_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_in_1_d_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_in_1_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_in_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_anon_in_1_d_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_in_1_d_bits_size, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_in_1_d_bits_source, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_in_1_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_anon_in_1_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_in_1_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_in_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_in_0_a_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_in_0_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [3:0] auto_anon_in_0_a_bits_size, // @[LazyModuleImp.scala:107:25] input [31:0] auto_anon_in_0_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_in_0_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_in_0_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_in_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_in_0_d_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_in_0_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_in_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_anon_in_0_d_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_in_0_d_bits_size, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_in_0_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_anon_in_0_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_in_0_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_in_0_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [5:0] auto_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_anon_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [5:0] auto_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [3:0] auto_anon_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_anon_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25] ); wire [3:0] out_0_d_bits_sink; // @[Xbar.scala:216:19] wire [5:0] in_3_a_bits_source; // @[Xbar.scala:159:18] wire [5:0] in_2_a_bits_source; // @[Xbar.scala:159:18] wire [5:0] in_1_a_bits_source; // @[Xbar.scala:159:18] wire auto_anon_in_3_a_valid_0 = auto_anon_in_3_a_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_3_a_bits_opcode_0 = auto_anon_in_3_a_bits_opcode; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_3_a_bits_size_0 = auto_anon_in_3_a_bits_size; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_3_a_bits_source_0 = auto_anon_in_3_a_bits_source; // @[Xbar.scala:74:9] wire [31:0] auto_anon_in_3_a_bits_address_0 = auto_anon_in_3_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] auto_anon_in_3_a_bits_mask_0 = auto_anon_in_3_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_3_a_bits_data_0 = auto_anon_in_3_a_bits_data; // @[Xbar.scala:74:9] wire auto_anon_in_3_d_ready_0 = auto_anon_in_3_d_ready; // @[Xbar.scala:74:9] wire auto_anon_in_2_a_valid_0 = auto_anon_in_2_a_valid; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_2_a_bits_size_0 = auto_anon_in_2_a_bits_size; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_2_a_bits_source_0 = auto_anon_in_2_a_bits_source; // @[Xbar.scala:74:9] wire [31:0] auto_anon_in_2_a_bits_address_0 = auto_anon_in_2_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] auto_anon_in_2_a_bits_mask_0 = auto_anon_in_2_a_bits_mask; // @[Xbar.scala:74:9] wire auto_anon_in_1_a_valid_0 = auto_anon_in_1_a_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_1_a_bits_opcode_0 = auto_anon_in_1_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_1_a_bits_param_0 = auto_anon_in_1_a_bits_param; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_1_a_bits_size_0 = auto_anon_in_1_a_bits_size; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_1_a_bits_source_0 = auto_anon_in_1_a_bits_source; // @[Xbar.scala:74:9] wire [31:0] auto_anon_in_1_a_bits_address_0 = auto_anon_in_1_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] auto_anon_in_1_a_bits_mask_0 = auto_anon_in_1_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_1_a_bits_data_0 = auto_anon_in_1_a_bits_data; // @[Xbar.scala:74:9] wire auto_anon_in_1_a_bits_corrupt_0 = auto_anon_in_1_a_bits_corrupt; // @[Xbar.scala:74:9] wire auto_anon_in_1_d_ready_0 = auto_anon_in_1_d_ready; // @[Xbar.scala:74:9] wire auto_anon_in_0_a_valid_0 = auto_anon_in_0_a_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_0_a_bits_opcode_0 = auto_anon_in_0_a_bits_opcode; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_0_a_bits_size_0 = auto_anon_in_0_a_bits_size; // @[Xbar.scala:74:9] wire [31:0] auto_anon_in_0_a_bits_address_0 = auto_anon_in_0_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] auto_anon_in_0_a_bits_mask_0 = auto_anon_in_0_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_0_a_bits_data_0 = auto_anon_in_0_a_bits_data; // @[Xbar.scala:74:9] wire auto_anon_in_0_a_bits_corrupt_0 = auto_anon_in_0_a_bits_corrupt; // @[Xbar.scala:74:9] wire auto_anon_in_0_d_ready_0 = auto_anon_in_0_d_ready; // @[Xbar.scala:74:9] wire auto_anon_out_a_ready_0 = auto_anon_out_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_d_valid_0 = auto_anon_out_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_d_bits_opcode_0 = auto_anon_out_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_d_bits_param_0 = auto_anon_out_d_bits_param; // @[Xbar.scala:74:9] wire [3:0] auto_anon_out_d_bits_size_0 = auto_anon_out_d_bits_size; // @[Xbar.scala:74:9] wire [5:0] auto_anon_out_d_bits_source_0 = auto_anon_out_d_bits_source; // @[Xbar.scala:74:9] wire [3:0] auto_anon_out_d_bits_sink_0 = auto_anon_out_d_bits_sink; // @[Xbar.scala:74:9] wire auto_anon_out_d_bits_denied_0 = auto_anon_out_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_d_bits_data_0 = auto_anon_out_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_d_bits_corrupt_0 = auto_anon_out_d_bits_corrupt; // @[Xbar.scala:74:9] wire _readys_T_2 = reset; // @[Arbiter.scala:22:12] wire [2:0] auto_anon_in_3_a_bits_param = 3'h0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_2_a_bits_param = 3'h0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_0_a_bits_param = 3'h0; // @[Xbar.scala:74:9] wire [2:0] anonIn_a_bits_param = 3'h0; // @[MixedNode.scala:551:17] wire [2:0] anonIn_2_a_bits_param = 3'h0; // @[MixedNode.scala:551:17] wire [2:0] anonIn_3_a_bits_param = 3'h0; // @[MixedNode.scala:551:17] wire [2:0] in_0_a_bits_param = 3'h0; // @[Xbar.scala:159:18] wire [2:0] in_2_a_bits_param = 3'h0; // @[Xbar.scala:159:18] wire [2:0] in_3_a_bits_param = 3'h0; // @[Xbar.scala:159:18] wire [2:0] _addressC_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _addressC_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _addressC_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _addressC_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _addressC_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _addressC_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _addressC_WIRE_6_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_6_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_7_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _addressC_WIRE_7_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _requestBOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] requestBOI_uncommonBits_1 = 3'h0; // @[Parameters.scala:52:56] wire [2:0] _requestBOI_T_5 = 3'h0; // @[Parameters.scala:54:10] wire [2:0] _requestBOI_WIRE_6_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_7_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] requestBOI_uncommonBits_2 = 3'h0; // @[Parameters.scala:52:56] wire [2:0] _requestBOI_T_10 = 3'h0; // @[Parameters.scala:54:10] wire [2:0] _beatsBO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _beatsCI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _beatsCI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _beatsCI_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _beatsCI_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _beatsCI_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _beatsCI_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _beatsCI_WIRE_6_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_6_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_7_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _beatsCI_WIRE_7_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] portsAOI_filtered_0_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsAOI_filtered_2_0_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsAOI_filtered_3_0_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsBIO_filtered_1_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsBIO_filtered_2_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsBIO_filtered_3_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsCOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _portsCOI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] portsCOI_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_0_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsCOI_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _portsCOI_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] portsCOI_filtered_1_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_1_0_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsCOI_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _portsCOI_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] portsCOI_filtered_2_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_2_0_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsCOI_WIRE_6_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_6_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_7_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _portsCOI_WIRE_7_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] portsCOI_filtered_3_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_3_0_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _out_0_a_bits_T_42 = 3'h0; // @[Mux.scala:30:73] wire [2:0] _out_0_a_bits_T_44 = 3'h0; // @[Mux.scala:30:73] wire [2:0] _out_0_a_bits_T_45 = 3'h0; // @[Mux.scala:30:73] wire auto_anon_in_3_a_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_in_2_a_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_in_0_a_bits_source = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_in_0_d_bits_source = 1'h0; // @[Xbar.scala:74:9] wire anonIn_a_bits_source = 1'h0; // @[MixedNode.scala:551:17] wire anonIn_d_bits_source = 1'h0; // @[MixedNode.scala:551:17] wire anonIn_2_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire anonIn_3_a_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17] wire in_2_a_bits_corrupt = 1'h0; // @[Xbar.scala:159:18] wire in_3_a_bits_corrupt = 1'h0; // @[Xbar.scala:159:18] wire _addressC_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_6_ready = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_6_valid = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_6_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_7_ready = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_7_valid = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_7_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _requestBOI_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire requestBOI_0_0 = 1'h0; // @[Parameters.scala:46:9] wire _requestBOI_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_4_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_4_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_5_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_5_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_6 = 1'h0; // @[Parameters.scala:54:32] wire _requestBOI_T_8 = 1'h0; // @[Parameters.scala:54:67] wire requestBOI_0_2 = 1'h0; // @[Parameters.scala:56:48] wire _requestBOI_WIRE_6_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_6_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_6_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_7_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_7_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_7_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_11 = 1'h0; // @[Parameters.scala:54:32] wire _requestBOI_T_13 = 1'h0; // @[Parameters.scala:54:67] wire requestBOI_0_3 = 1'h0; // @[Parameters.scala:56:48] wire _requestEIO_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_T = 1'h0; // @[Parameters.scala:54:10] wire _requestEIO_WIRE_2_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_2_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_3_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_3_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_T_5 = 1'h0; // @[Parameters.scala:54:10] wire _requestEIO_WIRE_4_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_4_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_5_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_5_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_T_10 = 1'h0; // @[Parameters.scala:54:10] wire _requestEIO_WIRE_6_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_6_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_7_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_7_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_T_15 = 1'h0; // @[Parameters.scala:54:10] wire beatsAI_opdata_2 = 1'h0; // @[Edges.scala:92:28] wire _beatsBO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T = 1'h0; // @[Edges.scala:97:37] wire _beatsCI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire beatsCI_opdata = 1'h0; // @[Edges.scala:102:36] wire _beatsCI_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire beatsCI_opdata_1 = 1'h0; // @[Edges.scala:102:36] wire _beatsCI_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire beatsCI_opdata_2 = 1'h0; // @[Edges.scala:102:36] wire _beatsCI_WIRE_6_ready = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_6_valid = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_6_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_7_ready = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_7_valid = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_7_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire beatsCI_opdata_3 = 1'h0; // @[Edges.scala:102:36] wire _beatsEI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_2_ready = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_2_valid = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_3_ready = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_3_valid = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_4_ready = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_4_valid = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_5_ready = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_5_valid = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_6_ready = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_6_valid = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_7_ready = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_7_valid = 1'h0; // @[Bundles.scala:267:61] wire portsAOI_filtered_2_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsAOI_filtered_3_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_1_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_1_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_1_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_2_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_2_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_2_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_3_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_3_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_3_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_filtered_1_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_filtered_2_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsBIO_filtered_2_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_filtered_3_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsBIO_filtered_3_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_T = 1'h0; // @[Mux.scala:30:73] wire _portsBIO_T_1 = 1'h0; // @[Mux.scala:30:73] wire _portsBIO_T_2 = 1'h0; // @[Mux.scala:30:73] wire _portsBIO_T_3 = 1'h0; // @[Mux.scala:30:73] wire _portsBIO_T_4 = 1'h0; // @[Mux.scala:30:73] wire _portsBIO_T_5 = 1'h0; // @[Mux.scala:30:73] wire _portsBIO_T_6 = 1'h0; // @[Mux.scala:30:73] wire _portsBIO_WIRE_2 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire portsCOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsCOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire portsCOI_filtered_1_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_1_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_1_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsCOI_filtered_0_valid_T_3 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire portsCOI_filtered_2_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_2_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_2_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsCOI_filtered_0_valid_T_5 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_WIRE_6_ready = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_6_valid = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_6_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_7_ready = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_7_valid = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_7_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire portsCOI_filtered_3_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_3_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_3_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsCOI_filtered_0_valid_T_7 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _portsEOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire portsEOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire _portsEOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_WIRE_2_ready = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_2_valid = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_3_ready = 1'h0; // @[Bundles.scala:267:61] wire _portsEOI_WIRE_3_valid = 1'h0; // @[Bundles.scala:267:61] wire portsEOI_filtered_1_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_1_0_valid = 1'h0; // @[Xbar.scala:352:24] wire _portsEOI_filtered_0_valid_T_3 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_WIRE_4_ready = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_4_valid = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_5_ready = 1'h0; // @[Bundles.scala:267:61] wire _portsEOI_WIRE_5_valid = 1'h0; // @[Bundles.scala:267:61] wire portsEOI_filtered_2_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_2_0_valid = 1'h0; // @[Xbar.scala:352:24] wire _portsEOI_filtered_0_valid_T_5 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_WIRE_6_ready = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_6_valid = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_7_ready = 1'h0; // @[Bundles.scala:267:61] wire _portsEOI_WIRE_7_valid = 1'h0; // @[Bundles.scala:267:61] wire portsEOI_filtered_3_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_3_0_valid = 1'h0; // @[Xbar.scala:352:24] wire _portsEOI_filtered_0_valid_T_7 = 1'h0; // @[Xbar.scala:355:40] wire _state_WIRE_0 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_1 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_2 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_3 = 1'h0; // @[Arbiter.scala:88:34] wire _out_0_a_bits_T_2 = 1'h0; // @[Mux.scala:30:73] wire _out_0_a_bits_T_3 = 1'h0; // @[Mux.scala:30:73] wire [2:0] auto_anon_in_2_a_bits_opcode = 3'h4; // @[Xbar.scala:74:9] wire [2:0] anonIn_2_a_bits_opcode = 3'h4; // @[MixedNode.scala:551:17] wire [2:0] in_2_a_bits_opcode = 3'h4; // @[Xbar.scala:159:18] wire [2:0] portsAOI_filtered_2_0_bits_opcode = 3'h4; // @[Xbar.scala:352:24] wire [63:0] auto_anon_in_2_a_bits_data = 64'h0; // @[Xbar.scala:74:9] wire [63:0] anonIn_2_a_bits_data = 64'h0; // @[MixedNode.scala:551:17] wire [63:0] in_2_a_bits_data = 64'h0; // @[Xbar.scala:159:18] wire [63:0] _addressC_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _addressC_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _addressC_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _addressC_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _addressC_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _addressC_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _addressC_WIRE_6_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _addressC_WIRE_7_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _requestBOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_6_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_7_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsCI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _beatsCI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _beatsCI_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _beatsCI_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _beatsCI_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _beatsCI_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _beatsCI_WIRE_6_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _beatsCI_WIRE_7_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] portsAOI_filtered_2_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsBIO_filtered_1_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsBIO_filtered_2_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsBIO_filtered_3_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsCOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _portsCOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] portsCOI_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsCOI_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _portsCOI_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] portsCOI_filtered_1_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsCOI_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _portsCOI_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] portsCOI_filtered_2_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsCOI_WIRE_6_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _portsCOI_WIRE_7_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] portsCOI_filtered_3_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _out_0_a_bits_T_9 = 64'h0; // @[Mux.scala:30:73] wire auto_anon_in_2_d_ready = 1'h1; // @[Xbar.scala:74:9] wire anonIn_2_d_ready = 1'h1; // @[MixedNode.scala:551:17] wire in_2_d_ready = 1'h1; // @[Xbar.scala:159:18] wire _requestAIO_T_4 = 1'h1; // @[Parameters.scala:137:59] wire requestAIO_0_0 = 1'h1; // @[Xbar.scala:307:107] wire _requestAIO_T_9 = 1'h1; // @[Parameters.scala:137:59] wire requestAIO_1_0 = 1'h1; // @[Xbar.scala:307:107] wire _requestAIO_T_14 = 1'h1; // @[Parameters.scala:137:59] wire requestAIO_2_0 = 1'h1; // @[Xbar.scala:307:107] wire _requestAIO_T_19 = 1'h1; // @[Parameters.scala:137:59] wire requestAIO_3_0 = 1'h1; // @[Xbar.scala:307:107] wire _requestCIO_T_4 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_0 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_9 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_1_0 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_14 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_2_0 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_19 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_3_0 = 1'h1; // @[Xbar.scala:308:107] wire _requestBOI_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_4 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_0_1 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_7 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_9 = 1'h1; // @[Parameters.scala:57:20] wire _requestBOI_T_12 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_14 = 1'h1; // @[Parameters.scala:57:20] wire _requestDOI_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_4 = 1'h1; // @[Parameters.scala:57:20] wire _requestDOI_T_7 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_9 = 1'h1; // @[Parameters.scala:57:20] wire _requestDOI_T_12 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_14 = 1'h1; // @[Parameters.scala:57:20] wire _requestEIO_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _requestEIO_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _requestEIO_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _requestEIO_T_4 = 1'h1; // @[Parameters.scala:57:20] wire requestEIO_0_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestEIO_T_6 = 1'h1; // @[Parameters.scala:54:32] wire _requestEIO_T_7 = 1'h1; // @[Parameters.scala:56:32] wire _requestEIO_T_8 = 1'h1; // @[Parameters.scala:54:67] wire _requestEIO_T_9 = 1'h1; // @[Parameters.scala:57:20] wire requestEIO_1_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestEIO_T_11 = 1'h1; // @[Parameters.scala:54:32] wire _requestEIO_T_12 = 1'h1; // @[Parameters.scala:56:32] wire _requestEIO_T_13 = 1'h1; // @[Parameters.scala:54:67] wire _requestEIO_T_14 = 1'h1; // @[Parameters.scala:57:20] wire requestEIO_2_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestEIO_T_16 = 1'h1; // @[Parameters.scala:54:32] wire _requestEIO_T_17 = 1'h1; // @[Parameters.scala:56:32] wire _requestEIO_T_18 = 1'h1; // @[Parameters.scala:54:67] wire _requestEIO_T_19 = 1'h1; // @[Parameters.scala:57:20] wire requestEIO_3_0 = 1'h1; // @[Parameters.scala:56:48] wire _beatsAI_opdata_T_2 = 1'h1; // @[Edges.scala:92:37] wire beatsBO_opdata = 1'h1; // @[Edges.scala:97:28] wire _portsAOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsAOI_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54] wire _portsAOI_filtered_0_valid_T_4 = 1'h1; // @[Xbar.scala:355:54] wire _portsAOI_filtered_0_valid_T_6 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_1_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_0_valid_T_4 = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_0_valid_T_6 = 1'h1; // @[Xbar.scala:355:54] wire portsDIO_filtered_2_ready = 1'h1; // @[Xbar.scala:352:24] wire _portsEOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsEOI_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54] wire _portsEOI_filtered_0_valid_T_4 = 1'h1; // @[Xbar.scala:355:54] wire _portsEOI_filtered_0_valid_T_6 = 1'h1; // @[Xbar.scala:355:54] wire [8:0] beatsAI_2 = 9'h0; // @[Edges.scala:221:14] wire [8:0] beatsBO_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] beatsBO_0 = 9'h0; // @[Edges.scala:221:14] wire [8:0] beatsCI_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] beatsCI_0 = 9'h0; // @[Edges.scala:221:14] wire [8:0] beatsCI_decode_1 = 9'h0; // @[Edges.scala:220:59] wire [8:0] beatsCI_1 = 9'h0; // @[Edges.scala:221:14] wire [8:0] beatsCI_decode_2 = 9'h0; // @[Edges.scala:220:59] wire [8:0] beatsCI_2 = 9'h0; // @[Edges.scala:221:14] wire [8:0] beatsCI_decode_3 = 9'h0; // @[Edges.scala:220:59] wire [8:0] beatsCI_3 = 9'h0; // @[Edges.scala:221:14] wire [8:0] maskedBeats_2 = 9'h0; // @[Arbiter.scala:82:69] wire [3:0] _addressC_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _addressC_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _addressC_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _addressC_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _addressC_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _addressC_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _addressC_WIRE_6_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _addressC_WIRE_7_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _requestBOI_WIRE_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] requestBOI_uncommonBits = 4'h0; // @[Parameters.scala:52:56] wire [3:0] _requestBOI_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_6_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_7_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestEIO_WIRE_bits_sink = 4'h0; // @[Bundles.scala:267:74] wire [3:0] _requestEIO_WIRE_1_bits_sink = 4'h0; // @[Bundles.scala:267:61] wire [3:0] _requestEIO_uncommonBits_T = 4'h0; // @[Parameters.scala:52:29] wire [3:0] requestEIO_uncommonBits = 4'h0; // @[Parameters.scala:52:56] wire [3:0] _requestEIO_WIRE_2_bits_sink = 4'h0; // @[Bundles.scala:267:74] wire [3:0] _requestEIO_WIRE_3_bits_sink = 4'h0; // @[Bundles.scala:267:61] wire [3:0] _requestEIO_uncommonBits_T_1 = 4'h0; // @[Parameters.scala:52:29] wire [3:0] requestEIO_uncommonBits_1 = 4'h0; // @[Parameters.scala:52:56] wire [3:0] _requestEIO_WIRE_4_bits_sink = 4'h0; // @[Bundles.scala:267:74] wire [3:0] _requestEIO_WIRE_5_bits_sink = 4'h0; // @[Bundles.scala:267:61] wire [3:0] _requestEIO_uncommonBits_T_2 = 4'h0; // @[Parameters.scala:52:29] wire [3:0] requestEIO_uncommonBits_2 = 4'h0; // @[Parameters.scala:52:56] wire [3:0] _requestEIO_WIRE_6_bits_sink = 4'h0; // @[Bundles.scala:267:74] wire [3:0] _requestEIO_WIRE_7_bits_sink = 4'h0; // @[Bundles.scala:267:61] wire [3:0] _requestEIO_uncommonBits_T_3 = 4'h0; // @[Parameters.scala:52:29] wire [3:0] requestEIO_uncommonBits_3 = 4'h0; // @[Parameters.scala:52:56] wire [3:0] _beatsBO_WIRE_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsCI_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _beatsCI_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _beatsCI_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _beatsCI_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _beatsCI_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _beatsCI_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _beatsCI_WIRE_6_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _beatsCI_WIRE_7_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _beatsEI_WIRE_bits_sink = 4'h0; // @[Bundles.scala:267:74] wire [3:0] _beatsEI_WIRE_1_bits_sink = 4'h0; // @[Bundles.scala:267:61] wire [3:0] _beatsEI_WIRE_2_bits_sink = 4'h0; // @[Bundles.scala:267:74] wire [3:0] _beatsEI_WIRE_3_bits_sink = 4'h0; // @[Bundles.scala:267:61] wire [3:0] _beatsEI_WIRE_4_bits_sink = 4'h0; // @[Bundles.scala:267:74] wire [3:0] _beatsEI_WIRE_5_bits_sink = 4'h0; // @[Bundles.scala:267:61] wire [3:0] _beatsEI_WIRE_6_bits_sink = 4'h0; // @[Bundles.scala:267:74] wire [3:0] _beatsEI_WIRE_7_bits_sink = 4'h0; // @[Bundles.scala:267:61] wire [3:0] _portsBIO_WIRE_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsBIO_filtered_1_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsBIO_filtered_2_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsBIO_filtered_3_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsCOI_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _portsCOI_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] portsCOI_filtered_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsCOI_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _portsCOI_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] portsCOI_filtered_1_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsCOI_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _portsCOI_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] portsCOI_filtered_2_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsCOI_WIRE_6_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _portsCOI_WIRE_7_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] portsCOI_filtered_3_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsEOI_WIRE_bits_sink = 4'h0; // @[Bundles.scala:267:74] wire [3:0] _portsEOI_WIRE_1_bits_sink = 4'h0; // @[Bundles.scala:267:61] wire [3:0] portsEOI_filtered_0_bits_sink = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsEOI_WIRE_2_bits_sink = 4'h0; // @[Bundles.scala:267:74] wire [3:0] _portsEOI_WIRE_3_bits_sink = 4'h0; // @[Bundles.scala:267:61] wire [3:0] portsEOI_filtered_1_0_bits_sink = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsEOI_WIRE_4_bits_sink = 4'h0; // @[Bundles.scala:267:74] wire [3:0] _portsEOI_WIRE_5_bits_sink = 4'h0; // @[Bundles.scala:267:61] wire [3:0] portsEOI_filtered_2_0_bits_sink = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsEOI_WIRE_6_bits_sink = 4'h0; // @[Bundles.scala:267:74] wire [3:0] _portsEOI_WIRE_7_bits_sink = 4'h0; // @[Bundles.scala:267:61] wire [3:0] portsEOI_filtered_3_0_bits_sink = 4'h0; // @[Xbar.scala:352:24] wire [31:0] _addressC_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _addressC_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _addressC_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _addressC_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _addressC_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _addressC_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _addressC_WIRE_6_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _addressC_WIRE_7_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _requestCIO_T = 32'h0; // @[Parameters.scala:137:31] wire [31:0] _requestCIO_T_5 = 32'h0; // @[Parameters.scala:137:31] wire [31:0] _requestCIO_T_10 = 32'h0; // @[Parameters.scala:137:31] wire [31:0] _requestCIO_T_15 = 32'h0; // @[Parameters.scala:137:31] wire [31:0] _requestBOI_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _requestBOI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _requestBOI_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _requestBOI_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _requestBOI_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _requestBOI_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _requestBOI_WIRE_6_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _requestBOI_WIRE_7_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _beatsBO_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _beatsBO_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] _beatsCI_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _beatsCI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _beatsCI_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _beatsCI_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _beatsCI_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _beatsCI_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _beatsCI_WIRE_6_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _beatsCI_WIRE_7_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _portsBIO_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74] wire [31:0] _portsBIO_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:264:61] wire [31:0] portsBIO_filtered_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] portsBIO_filtered_1_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] portsBIO_filtered_2_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] portsBIO_filtered_3_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] _portsCOI_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _portsCOI_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] portsCOI_filtered_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] _portsCOI_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _portsCOI_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] portsCOI_filtered_1_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] _portsCOI_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _portsCOI_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] portsCOI_filtered_2_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [31:0] _portsCOI_WIRE_6_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _portsCOI_WIRE_7_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] portsCOI_filtered_3_0_bits_address = 32'h0; // @[Xbar.scala:352:24] wire [5:0] _addressC_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _addressC_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _addressC_WIRE_2_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _addressC_WIRE_3_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _addressC_WIRE_4_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _addressC_WIRE_5_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _addressC_WIRE_6_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _addressC_WIRE_7_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _requestBOI_WIRE_bits_source = 6'h0; // @[Bundles.scala:264:74] wire [5:0] _requestBOI_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:264:61] wire [5:0] _requestBOI_WIRE_2_bits_source = 6'h0; // @[Bundles.scala:264:74] wire [5:0] _requestBOI_WIRE_3_bits_source = 6'h0; // @[Bundles.scala:264:61] wire [5:0] _requestBOI_uncommonBits_T = 6'h0; // @[Parameters.scala:52:29] wire [5:0] _requestBOI_WIRE_4_bits_source = 6'h0; // @[Bundles.scala:264:74] wire [5:0] _requestBOI_WIRE_5_bits_source = 6'h0; // @[Bundles.scala:264:61] wire [5:0] _requestBOI_uncommonBits_T_1 = 6'h0; // @[Parameters.scala:52:29] wire [5:0] _requestBOI_WIRE_6_bits_source = 6'h0; // @[Bundles.scala:264:74] wire [5:0] _requestBOI_WIRE_7_bits_source = 6'h0; // @[Bundles.scala:264:61] wire [5:0] _requestBOI_uncommonBits_T_2 = 6'h0; // @[Parameters.scala:52:29] wire [5:0] _beatsBO_WIRE_bits_source = 6'h0; // @[Bundles.scala:264:74] wire [5:0] _beatsBO_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:264:61] wire [5:0] _beatsCI_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _beatsCI_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _beatsCI_WIRE_2_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _beatsCI_WIRE_3_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _beatsCI_WIRE_4_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _beatsCI_WIRE_5_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _beatsCI_WIRE_6_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _beatsCI_WIRE_7_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] _portsBIO_WIRE_bits_source = 6'h0; // @[Bundles.scala:264:74] wire [5:0] _portsBIO_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:264:61] wire [5:0] portsBIO_filtered_0_bits_source = 6'h0; // @[Xbar.scala:352:24] wire [5:0] portsBIO_filtered_1_bits_source = 6'h0; // @[Xbar.scala:352:24] wire [5:0] portsBIO_filtered_2_bits_source = 6'h0; // @[Xbar.scala:352:24] wire [5:0] portsBIO_filtered_3_bits_source = 6'h0; // @[Xbar.scala:352:24] wire [5:0] _portsCOI_WIRE_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _portsCOI_WIRE_1_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] portsCOI_filtered_0_bits_source = 6'h0; // @[Xbar.scala:352:24] wire [5:0] _portsCOI_WIRE_2_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _portsCOI_WIRE_3_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] portsCOI_filtered_1_0_bits_source = 6'h0; // @[Xbar.scala:352:24] wire [5:0] _portsCOI_WIRE_4_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _portsCOI_WIRE_5_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] portsCOI_filtered_2_0_bits_source = 6'h0; // @[Xbar.scala:352:24] wire [5:0] _portsCOI_WIRE_6_bits_source = 6'h0; // @[Bundles.scala:265:74] wire [5:0] _portsCOI_WIRE_7_bits_source = 6'h0; // @[Bundles.scala:265:61] wire [5:0] portsCOI_filtered_3_0_bits_source = 6'h0; // @[Xbar.scala:352:24] wire [7:0] _requestBOI_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_2_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_3_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_4_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_5_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_6_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_7_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _portsBIO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] portsBIO_filtered_1_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] portsBIO_filtered_2_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] portsBIO_filtered_3_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [1:0] _requestBOI_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_T = 2'h0; // @[Parameters.scala:54:10] wire [1:0] _requestBOI_WIRE_4_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_5_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_6_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_7_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _portsBIO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsBIO_filtered_1_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsBIO_filtered_2_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsBIO_filtered_3_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [5:0] in_0_a_bits_source = 6'h20; // @[Xbar.scala:159:18] wire [5:0] _in_0_a_bits_source_T = 6'h20; // @[Xbar.scala:166:55] wire [5:0] portsAOI_filtered_0_bits_source = 6'h20; // @[Xbar.scala:352:24] wire [11:0] _beatsBO_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _beatsCI_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _beatsCI_decode_T_5 = 12'h0; // @[package.scala:243:46] wire [11:0] _beatsCI_decode_T_8 = 12'h0; // @[package.scala:243:46] wire [11:0] _beatsCI_decode_T_11 = 12'h0; // @[package.scala:243:46] wire [11:0] _beatsBO_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [11:0] _beatsCI_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [11:0] _beatsCI_decode_T_4 = 12'hFFF; // @[package.scala:243:76] wire [11:0] _beatsCI_decode_T_7 = 12'hFFF; // @[package.scala:243:76] wire [11:0] _beatsCI_decode_T_10 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _beatsBO_decode_T = 27'hFFF; // @[package.scala:243:71] wire [26:0] _beatsCI_decode_T = 27'hFFF; // @[package.scala:243:71] wire [26:0] _beatsCI_decode_T_3 = 27'hFFF; // @[package.scala:243:71] wire [26:0] _beatsCI_decode_T_6 = 27'hFFF; // @[package.scala:243:71] wire [26:0] _beatsCI_decode_T_9 = 27'hFFF; // @[package.scala:243:71] wire [32:0] _requestAIO_T_2 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestAIO_T_3 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestAIO_T_7 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestAIO_T_8 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestAIO_T_12 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestAIO_T_13 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestAIO_T_17 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestAIO_T_18 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_1 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] _requestCIO_T_2 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_3 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_6 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] _requestCIO_T_7 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_8 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_11 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] _requestCIO_T_12 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_13 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_16 = 33'h0; // @[Parameters.scala:137:41] wire [32:0] _requestCIO_T_17 = 33'h0; // @[Parameters.scala:137:46] wire [32:0] _requestCIO_T_18 = 33'h0; // @[Parameters.scala:137:46] wire anonIn_3_a_ready; // @[MixedNode.scala:551:17] wire anonIn_3_a_valid = auto_anon_in_3_a_valid_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_3_a_bits_opcode = auto_anon_in_3_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [3:0] anonIn_3_a_bits_size = auto_anon_in_3_a_bits_size_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_3_a_bits_source = auto_anon_in_3_a_bits_source_0; // @[Xbar.scala:74:9] wire [31:0] anonIn_3_a_bits_address = auto_anon_in_3_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] anonIn_3_a_bits_mask = auto_anon_in_3_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] anonIn_3_a_bits_data = auto_anon_in_3_a_bits_data_0; // @[Xbar.scala:74:9] wire anonIn_3_d_ready = auto_anon_in_3_d_ready_0; // @[Xbar.scala:74:9] wire anonIn_3_d_valid; // @[MixedNode.scala:551:17] wire [2:0] anonIn_3_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] anonIn_3_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] anonIn_3_d_bits_size; // @[MixedNode.scala:551:17] wire [2:0] anonIn_3_d_bits_source; // @[MixedNode.scala:551:17] wire [3:0] anonIn_3_d_bits_sink; // @[MixedNode.scala:551:17] wire anonIn_3_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] anonIn_3_d_bits_data; // @[MixedNode.scala:551:17] wire anonIn_3_d_bits_corrupt; // @[MixedNode.scala:551:17] wire anonIn_2_a_ready; // @[MixedNode.scala:551:17] wire anonIn_2_a_valid = auto_anon_in_2_a_valid_0; // @[Xbar.scala:74:9] wire [3:0] anonIn_2_a_bits_size = auto_anon_in_2_a_bits_size_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_2_a_bits_source = auto_anon_in_2_a_bits_source_0; // @[Xbar.scala:74:9] wire [31:0] anonIn_2_a_bits_address = auto_anon_in_2_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] anonIn_2_a_bits_mask = auto_anon_in_2_a_bits_mask_0; // @[Xbar.scala:74:9] wire anonIn_2_d_valid; // @[MixedNode.scala:551:17] wire [2:0] anonIn_2_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] anonIn_2_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] anonIn_2_d_bits_size; // @[MixedNode.scala:551:17] wire [2:0] anonIn_2_d_bits_source; // @[MixedNode.scala:551:17] wire [3:0] anonIn_2_d_bits_sink; // @[MixedNode.scala:551:17] wire anonIn_2_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] anonIn_2_d_bits_data; // @[MixedNode.scala:551:17] wire anonIn_2_d_bits_corrupt; // @[MixedNode.scala:551:17] wire anonIn_1_a_ready; // @[MixedNode.scala:551:17] wire anonIn_1_a_valid = auto_anon_in_1_a_valid_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_1_a_bits_opcode = auto_anon_in_1_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_1_a_bits_param = auto_anon_in_1_a_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] anonIn_1_a_bits_size = auto_anon_in_1_a_bits_size_0; // @[Xbar.scala:74:9] wire [3:0] anonIn_1_a_bits_source = auto_anon_in_1_a_bits_source_0; // @[Xbar.scala:74:9] wire [31:0] anonIn_1_a_bits_address = auto_anon_in_1_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] anonIn_1_a_bits_mask = auto_anon_in_1_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] anonIn_1_a_bits_data = auto_anon_in_1_a_bits_data_0; // @[Xbar.scala:74:9] wire anonIn_1_a_bits_corrupt = auto_anon_in_1_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire anonIn_1_d_ready = auto_anon_in_1_d_ready_0; // @[Xbar.scala:74:9] wire anonIn_1_d_valid; // @[MixedNode.scala:551:17] wire [2:0] anonIn_1_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] anonIn_1_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] anonIn_1_d_bits_size; // @[MixedNode.scala:551:17] wire [3:0] anonIn_1_d_bits_source; // @[MixedNode.scala:551:17] wire [3:0] anonIn_1_d_bits_sink; // @[MixedNode.scala:551:17] wire anonIn_1_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] anonIn_1_d_bits_data; // @[MixedNode.scala:551:17] wire anonIn_1_d_bits_corrupt; // @[MixedNode.scala:551:17] wire anonIn_a_ready; // @[MixedNode.scala:551:17] wire anonIn_a_valid = auto_anon_in_0_a_valid_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_a_bits_opcode = auto_anon_in_0_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [3:0] anonIn_a_bits_size = auto_anon_in_0_a_bits_size_0; // @[Xbar.scala:74:9] wire [31:0] anonIn_a_bits_address = auto_anon_in_0_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] anonIn_a_bits_mask = auto_anon_in_0_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] anonIn_a_bits_data = auto_anon_in_0_a_bits_data_0; // @[Xbar.scala:74:9] wire anonIn_a_bits_corrupt = auto_anon_in_0_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire anonIn_d_ready = auto_anon_in_0_d_ready_0; // @[Xbar.scala:74:9] wire anonIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] anonIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] anonIn_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] anonIn_d_bits_size; // @[MixedNode.scala:551:17] wire [3:0] anonIn_d_bits_sink; // @[MixedNode.scala:551:17] wire anonIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] anonIn_d_bits_data; // @[MixedNode.scala:551:17] wire anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire anonOut_a_ready = auto_anon_out_a_ready_0; // @[Xbar.scala:74:9] wire anonOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] anonOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] anonOut_a_bits_param; // @[MixedNode.scala:542:17] wire [3:0] anonOut_a_bits_size; // @[MixedNode.scala:542:17] wire [5:0] anonOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] anonOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] anonOut_a_bits_data; // @[MixedNode.scala:542:17] wire anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire anonOut_d_ready; // @[MixedNode.scala:542:17] wire anonOut_d_valid = auto_anon_out_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] anonOut_d_bits_opcode = auto_anon_out_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] anonOut_d_bits_param = auto_anon_out_d_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] anonOut_d_bits_size = auto_anon_out_d_bits_size_0; // @[Xbar.scala:74:9] wire [5:0] anonOut_d_bits_source = auto_anon_out_d_bits_source_0; // @[Xbar.scala:74:9] wire [3:0] anonOut_d_bits_sink = auto_anon_out_d_bits_sink_0; // @[Xbar.scala:74:9] wire anonOut_d_bits_denied = auto_anon_out_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] anonOut_d_bits_data = auto_anon_out_d_bits_data_0; // @[Xbar.scala:74:9] wire anonOut_d_bits_corrupt = auto_anon_out_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_in_3_a_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_3_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_in_3_d_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_3_d_bits_size_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_3_d_bits_source_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_3_d_bits_sink_0; // @[Xbar.scala:74:9] wire auto_anon_in_3_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_3_d_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_in_3_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_in_3_d_valid_0; // @[Xbar.scala:74:9] wire auto_anon_in_2_a_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_2_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_in_2_d_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_2_d_bits_size_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_2_d_bits_source_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_2_d_bits_sink_0; // @[Xbar.scala:74:9] wire auto_anon_in_2_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_2_d_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_in_2_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_in_2_d_valid_0; // @[Xbar.scala:74:9] wire auto_anon_in_1_a_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_1_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_in_1_d_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_1_d_bits_size_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_1_d_bits_source_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_1_d_bits_sink_0; // @[Xbar.scala:74:9] wire auto_anon_in_1_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_1_d_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_in_1_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_in_1_d_valid_0; // @[Xbar.scala:74:9] wire auto_anon_in_0_a_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_0_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_in_0_d_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_0_d_bits_size_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_0_d_bits_sink_0; // @[Xbar.scala:74:9] wire auto_anon_in_0_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_0_d_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_in_0_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_in_0_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_a_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_out_a_bits_size_0; // @[Xbar.scala:74:9] wire [5:0] auto_anon_out_a_bits_source_0; // @[Xbar.scala:74:9] wire [31:0] auto_anon_out_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_d_ready_0; // @[Xbar.scala:74:9] wire in_0_a_ready; // @[Xbar.scala:159:18] assign auto_anon_in_0_a_ready_0 = anonIn_a_ready; // @[Xbar.scala:74:9] wire in_0_a_valid = anonIn_a_valid; // @[Xbar.scala:159:18] wire [2:0] in_0_a_bits_opcode = anonIn_a_bits_opcode; // @[Xbar.scala:159:18] wire [3:0] in_0_a_bits_size = anonIn_a_bits_size; // @[Xbar.scala:159:18] wire [31:0] in_0_a_bits_address = anonIn_a_bits_address; // @[Xbar.scala:159:18] wire [7:0] in_0_a_bits_mask = anonIn_a_bits_mask; // @[Xbar.scala:159:18] wire [63:0] in_0_a_bits_data = anonIn_a_bits_data; // @[Xbar.scala:159:18] wire in_0_a_bits_corrupt = anonIn_a_bits_corrupt; // @[Xbar.scala:159:18] wire in_0_d_ready = anonIn_d_ready; // @[Xbar.scala:159:18] wire in_0_d_valid; // @[Xbar.scala:159:18] assign auto_anon_in_0_d_valid_0 = anonIn_d_valid; // @[Xbar.scala:74:9] wire [2:0] in_0_d_bits_opcode; // @[Xbar.scala:159:18] assign auto_anon_in_0_d_bits_opcode_0 = anonIn_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] in_0_d_bits_param; // @[Xbar.scala:159:18] assign auto_anon_in_0_d_bits_param_0 = anonIn_d_bits_param; // @[Xbar.scala:74:9] wire [3:0] in_0_d_bits_size; // @[Xbar.scala:159:18] assign auto_anon_in_0_d_bits_size_0 = anonIn_d_bits_size; // @[Xbar.scala:74:9] wire [3:0] in_0_d_bits_sink; // @[Xbar.scala:159:18] assign auto_anon_in_0_d_bits_sink_0 = anonIn_d_bits_sink; // @[Xbar.scala:74:9] wire in_0_d_bits_denied; // @[Xbar.scala:159:18] assign auto_anon_in_0_d_bits_denied_0 = anonIn_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] in_0_d_bits_data; // @[Xbar.scala:159:18] assign auto_anon_in_0_d_bits_data_0 = anonIn_d_bits_data; // @[Xbar.scala:74:9] wire in_0_d_bits_corrupt; // @[Xbar.scala:159:18] assign auto_anon_in_0_d_bits_corrupt_0 = anonIn_d_bits_corrupt; // @[Xbar.scala:74:9] wire in_1_a_ready; // @[Xbar.scala:159:18] assign auto_anon_in_1_a_ready_0 = anonIn_1_a_ready; // @[Xbar.scala:74:9] wire in_1_a_valid = anonIn_1_a_valid; // @[Xbar.scala:159:18] wire [2:0] in_1_a_bits_opcode = anonIn_1_a_bits_opcode; // @[Xbar.scala:159:18] wire [2:0] in_1_a_bits_param = anonIn_1_a_bits_param; // @[Xbar.scala:159:18] wire [3:0] in_1_a_bits_size = anonIn_1_a_bits_size; // @[Xbar.scala:159:18] wire [3:0] _in_1_a_bits_source_T = anonIn_1_a_bits_source; // @[Xbar.scala:166:55] wire [31:0] in_1_a_bits_address = anonIn_1_a_bits_address; // @[Xbar.scala:159:18] wire [7:0] in_1_a_bits_mask = anonIn_1_a_bits_mask; // @[Xbar.scala:159:18] wire [63:0] in_1_a_bits_data = anonIn_1_a_bits_data; // @[Xbar.scala:159:18] wire in_1_a_bits_corrupt = anonIn_1_a_bits_corrupt; // @[Xbar.scala:159:18] wire in_1_d_ready = anonIn_1_d_ready; // @[Xbar.scala:159:18] wire in_1_d_valid; // @[Xbar.scala:159:18] assign auto_anon_in_1_d_valid_0 = anonIn_1_d_valid; // @[Xbar.scala:74:9] wire [2:0] in_1_d_bits_opcode; // @[Xbar.scala:159:18] assign auto_anon_in_1_d_bits_opcode_0 = anonIn_1_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] in_1_d_bits_param; // @[Xbar.scala:159:18] assign auto_anon_in_1_d_bits_param_0 = anonIn_1_d_bits_param; // @[Xbar.scala:74:9] wire [3:0] in_1_d_bits_size; // @[Xbar.scala:159:18] assign auto_anon_in_1_d_bits_size_0 = anonIn_1_d_bits_size; // @[Xbar.scala:74:9] wire [3:0] _anonIn_d_bits_source_T; // @[Xbar.scala:156:69] assign auto_anon_in_1_d_bits_source_0 = anonIn_1_d_bits_source; // @[Xbar.scala:74:9] wire [3:0] in_1_d_bits_sink; // @[Xbar.scala:159:18] assign auto_anon_in_1_d_bits_sink_0 = anonIn_1_d_bits_sink; // @[Xbar.scala:74:9] wire in_1_d_bits_denied; // @[Xbar.scala:159:18] assign auto_anon_in_1_d_bits_denied_0 = anonIn_1_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] in_1_d_bits_data; // @[Xbar.scala:159:18] assign auto_anon_in_1_d_bits_data_0 = anonIn_1_d_bits_data; // @[Xbar.scala:74:9] wire in_1_d_bits_corrupt; // @[Xbar.scala:159:18] assign auto_anon_in_1_d_bits_corrupt_0 = anonIn_1_d_bits_corrupt; // @[Xbar.scala:74:9] wire in_2_a_ready; // @[Xbar.scala:159:18] assign auto_anon_in_2_a_ready_0 = anonIn_2_a_ready; // @[Xbar.scala:74:9] wire in_2_a_valid = anonIn_2_a_valid; // @[Xbar.scala:159:18] wire [3:0] in_2_a_bits_size = anonIn_2_a_bits_size; // @[Xbar.scala:159:18] wire [31:0] in_2_a_bits_address = anonIn_2_a_bits_address; // @[Xbar.scala:159:18] wire [7:0] in_2_a_bits_mask = anonIn_2_a_bits_mask; // @[Xbar.scala:159:18] wire in_2_d_valid; // @[Xbar.scala:159:18] assign auto_anon_in_2_d_valid_0 = anonIn_2_d_valid; // @[Xbar.scala:74:9] wire [2:0] in_2_d_bits_opcode; // @[Xbar.scala:159:18] assign auto_anon_in_2_d_bits_opcode_0 = anonIn_2_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] in_2_d_bits_param; // @[Xbar.scala:159:18] assign auto_anon_in_2_d_bits_param_0 = anonIn_2_d_bits_param; // @[Xbar.scala:74:9] wire [3:0] in_2_d_bits_size; // @[Xbar.scala:159:18] assign auto_anon_in_2_d_bits_size_0 = anonIn_2_d_bits_size; // @[Xbar.scala:74:9] wire [2:0] _anonIn_d_bits_source_T_1; // @[Xbar.scala:156:69] assign auto_anon_in_2_d_bits_source_0 = anonIn_2_d_bits_source; // @[Xbar.scala:74:9] wire [3:0] in_2_d_bits_sink; // @[Xbar.scala:159:18] assign auto_anon_in_2_d_bits_sink_0 = anonIn_2_d_bits_sink; // @[Xbar.scala:74:9] wire in_2_d_bits_denied; // @[Xbar.scala:159:18] assign auto_anon_in_2_d_bits_denied_0 = anonIn_2_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] in_2_d_bits_data; // @[Xbar.scala:159:18] assign auto_anon_in_2_d_bits_data_0 = anonIn_2_d_bits_data; // @[Xbar.scala:74:9] wire in_2_d_bits_corrupt; // @[Xbar.scala:159:18] assign auto_anon_in_2_d_bits_corrupt_0 = anonIn_2_d_bits_corrupt; // @[Xbar.scala:74:9] wire in_3_a_ready; // @[Xbar.scala:159:18] assign auto_anon_in_3_a_ready_0 = anonIn_3_a_ready; // @[Xbar.scala:74:9] wire in_3_a_valid = anonIn_3_a_valid; // @[Xbar.scala:159:18] wire [2:0] in_3_a_bits_opcode = anonIn_3_a_bits_opcode; // @[Xbar.scala:159:18] wire [3:0] in_3_a_bits_size = anonIn_3_a_bits_size; // @[Xbar.scala:159:18] wire [31:0] in_3_a_bits_address = anonIn_3_a_bits_address; // @[Xbar.scala:159:18] wire [7:0] in_3_a_bits_mask = anonIn_3_a_bits_mask; // @[Xbar.scala:159:18] wire [63:0] in_3_a_bits_data = anonIn_3_a_bits_data; // @[Xbar.scala:159:18] wire in_3_d_ready = anonIn_3_d_ready; // @[Xbar.scala:159:18] wire in_3_d_valid; // @[Xbar.scala:159:18] assign auto_anon_in_3_d_valid_0 = anonIn_3_d_valid; // @[Xbar.scala:74:9] wire [2:0] in_3_d_bits_opcode; // @[Xbar.scala:159:18] assign auto_anon_in_3_d_bits_opcode_0 = anonIn_3_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] in_3_d_bits_param; // @[Xbar.scala:159:18] assign auto_anon_in_3_d_bits_param_0 = anonIn_3_d_bits_param; // @[Xbar.scala:74:9] wire [3:0] in_3_d_bits_size; // @[Xbar.scala:159:18] assign auto_anon_in_3_d_bits_size_0 = anonIn_3_d_bits_size; // @[Xbar.scala:74:9] wire [2:0] _anonIn_d_bits_source_T_2; // @[Xbar.scala:156:69] assign auto_anon_in_3_d_bits_source_0 = anonIn_3_d_bits_source; // @[Xbar.scala:74:9] wire [3:0] in_3_d_bits_sink; // @[Xbar.scala:159:18] assign auto_anon_in_3_d_bits_sink_0 = anonIn_3_d_bits_sink; // @[Xbar.scala:74:9] wire in_3_d_bits_denied; // @[Xbar.scala:159:18] assign auto_anon_in_3_d_bits_denied_0 = anonIn_3_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] in_3_d_bits_data; // @[Xbar.scala:159:18] assign auto_anon_in_3_d_bits_data_0 = anonIn_3_d_bits_data; // @[Xbar.scala:74:9] wire in_3_d_bits_corrupt; // @[Xbar.scala:159:18] assign auto_anon_in_3_d_bits_corrupt_0 = anonIn_3_d_bits_corrupt; // @[Xbar.scala:74:9] wire out_0_a_ready = anonOut_a_ready; // @[Xbar.scala:216:19] wire out_0_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_a_valid_0 = anonOut_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_0_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_a_bits_opcode_0 = anonOut_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_0_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_a_bits_param_0 = anonOut_a_bits_param; // @[Xbar.scala:74:9] wire [3:0] out_0_a_bits_size; // @[Xbar.scala:216:19] assign auto_anon_out_a_bits_size_0 = anonOut_a_bits_size; // @[Xbar.scala:74:9] wire [5:0] out_0_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_a_bits_source_0 = anonOut_a_bits_source; // @[Xbar.scala:74:9] wire [31:0] out_0_a_bits_address; // @[Xbar.scala:216:19] assign auto_anon_out_a_bits_address_0 = anonOut_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_0_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_a_bits_mask_0 = anonOut_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_0_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_a_bits_data_0 = anonOut_a_bits_data; // @[Xbar.scala:74:9] wire out_0_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_a_bits_corrupt_0 = anonOut_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_0_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_d_ready_0 = anonOut_d_ready; // @[Xbar.scala:74:9] wire out_0_d_valid = anonOut_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_0_d_bits_opcode = anonOut_d_bits_opcode; // @[Xbar.scala:216:19] wire [1:0] out_0_d_bits_param = anonOut_d_bits_param; // @[Xbar.scala:216:19] wire [3:0] out_0_d_bits_size = anonOut_d_bits_size; // @[Xbar.scala:216:19] wire [5:0] out_0_d_bits_source = anonOut_d_bits_source; // @[Xbar.scala:216:19] wire [3:0] _out_0_d_bits_sink_T = anonOut_d_bits_sink; // @[Xbar.scala:251:53] wire out_0_d_bits_denied = anonOut_d_bits_denied; // @[Xbar.scala:216:19] wire [63:0] out_0_d_bits_data = anonOut_d_bits_data; // @[Xbar.scala:216:19] wire out_0_d_bits_corrupt = anonOut_d_bits_corrupt; // @[Xbar.scala:216:19] wire portsAOI_filtered_0_ready; // @[Xbar.scala:352:24] assign anonIn_a_ready = in_0_a_ready; // @[Xbar.scala:159:18] wire _portsAOI_filtered_0_valid_T_1 = in_0_a_valid; // @[Xbar.scala:159:18, :355:40] wire [2:0] portsAOI_filtered_0_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_0_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [31:0] _requestAIO_T = in_0_a_bits_address; // @[Xbar.scala:159:18] wire [31:0] portsAOI_filtered_0_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_0_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_0_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_0_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsDIO_filtered_0_ready = in_0_d_ready; // @[Xbar.scala:159:18, :352:24] wire portsDIO_filtered_0_valid; // @[Xbar.scala:352:24] assign anonIn_d_valid = in_0_d_valid; // @[Xbar.scala:159:18] wire [2:0] portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:352:24] assign anonIn_d_bits_opcode = in_0_d_bits_opcode; // @[Xbar.scala:159:18] wire [1:0] portsDIO_filtered_0_bits_param; // @[Xbar.scala:352:24] assign anonIn_d_bits_param = in_0_d_bits_param; // @[Xbar.scala:159:18] wire [3:0] portsDIO_filtered_0_bits_size; // @[Xbar.scala:352:24] assign anonIn_d_bits_size = in_0_d_bits_size; // @[Xbar.scala:159:18] wire [5:0] portsDIO_filtered_0_bits_source; // @[Xbar.scala:352:24] wire [3:0] portsDIO_filtered_0_bits_sink; // @[Xbar.scala:352:24] assign anonIn_d_bits_sink = in_0_d_bits_sink; // @[Xbar.scala:159:18] wire portsDIO_filtered_0_bits_denied; // @[Xbar.scala:352:24] assign anonIn_d_bits_denied = in_0_d_bits_denied; // @[Xbar.scala:159:18] wire [63:0] portsDIO_filtered_0_bits_data; // @[Xbar.scala:352:24] assign anonIn_d_bits_data = in_0_d_bits_data; // @[Xbar.scala:159:18] wire portsDIO_filtered_0_bits_corrupt; // @[Xbar.scala:352:24] assign anonIn_d_bits_corrupt = in_0_d_bits_corrupt; // @[Xbar.scala:159:18] wire portsAOI_filtered_1_0_ready; // @[Xbar.scala:352:24] assign anonIn_1_a_ready = in_1_a_ready; // @[Xbar.scala:159:18] wire _portsAOI_filtered_0_valid_T_3 = in_1_a_valid; // @[Xbar.scala:159:18, :355:40] wire [2:0] portsAOI_filtered_1_0_bits_opcode = in_1_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_1_0_bits_param = in_1_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_1_0_bits_size = in_1_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [5:0] portsAOI_filtered_1_0_bits_source = in_1_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [31:0] _requestAIO_T_5 = in_1_a_bits_address; // @[Xbar.scala:159:18] wire [31:0] portsAOI_filtered_1_0_bits_address = in_1_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_1_0_bits_mask = in_1_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_1_0_bits_data = in_1_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_1_0_bits_corrupt = in_1_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsDIO_filtered_1_ready = in_1_d_ready; // @[Xbar.scala:159:18, :352:24] wire portsDIO_filtered_1_valid; // @[Xbar.scala:352:24] assign anonIn_1_d_valid = in_1_d_valid; // @[Xbar.scala:159:18] wire [2:0] portsDIO_filtered_1_bits_opcode; // @[Xbar.scala:352:24] assign anonIn_1_d_bits_opcode = in_1_d_bits_opcode; // @[Xbar.scala:159:18] wire [1:0] portsDIO_filtered_1_bits_param; // @[Xbar.scala:352:24] assign anonIn_1_d_bits_param = in_1_d_bits_param; // @[Xbar.scala:159:18] wire [3:0] portsDIO_filtered_1_bits_size; // @[Xbar.scala:352:24] assign anonIn_1_d_bits_size = in_1_d_bits_size; // @[Xbar.scala:159:18] wire [5:0] portsDIO_filtered_1_bits_source; // @[Xbar.scala:352:24] wire [3:0] portsDIO_filtered_1_bits_sink; // @[Xbar.scala:352:24] assign anonIn_1_d_bits_sink = in_1_d_bits_sink; // @[Xbar.scala:159:18] wire portsDIO_filtered_1_bits_denied; // @[Xbar.scala:352:24] assign anonIn_1_d_bits_denied = in_1_d_bits_denied; // @[Xbar.scala:159:18] wire [63:0] portsDIO_filtered_1_bits_data; // @[Xbar.scala:352:24] assign anonIn_1_d_bits_data = in_1_d_bits_data; // @[Xbar.scala:159:18] wire portsDIO_filtered_1_bits_corrupt; // @[Xbar.scala:352:24] assign anonIn_1_d_bits_corrupt = in_1_d_bits_corrupt; // @[Xbar.scala:159:18] wire portsAOI_filtered_2_0_ready; // @[Xbar.scala:352:24] assign anonIn_2_a_ready = in_2_a_ready; // @[Xbar.scala:159:18] wire _portsAOI_filtered_0_valid_T_5 = in_2_a_valid; // @[Xbar.scala:159:18, :355:40] wire [3:0] portsAOI_filtered_2_0_bits_size = in_2_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [5:0] portsAOI_filtered_2_0_bits_source = in_2_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [31:0] _requestAIO_T_10 = in_2_a_bits_address; // @[Xbar.scala:159:18] wire [31:0] portsAOI_filtered_2_0_bits_address = in_2_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_2_0_bits_mask = in_2_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire portsDIO_filtered_2_valid; // @[Xbar.scala:352:24] assign anonIn_2_d_valid = in_2_d_valid; // @[Xbar.scala:159:18] wire [2:0] portsDIO_filtered_2_bits_opcode; // @[Xbar.scala:352:24] assign anonIn_2_d_bits_opcode = in_2_d_bits_opcode; // @[Xbar.scala:159:18] wire [1:0] portsDIO_filtered_2_bits_param; // @[Xbar.scala:352:24] assign anonIn_2_d_bits_param = in_2_d_bits_param; // @[Xbar.scala:159:18] wire [3:0] portsDIO_filtered_2_bits_size; // @[Xbar.scala:352:24] assign anonIn_2_d_bits_size = in_2_d_bits_size; // @[Xbar.scala:159:18] wire [5:0] portsDIO_filtered_2_bits_source; // @[Xbar.scala:352:24] wire [3:0] portsDIO_filtered_2_bits_sink; // @[Xbar.scala:352:24] assign anonIn_2_d_bits_sink = in_2_d_bits_sink; // @[Xbar.scala:159:18] wire portsDIO_filtered_2_bits_denied; // @[Xbar.scala:352:24] assign anonIn_2_d_bits_denied = in_2_d_bits_denied; // @[Xbar.scala:159:18] wire [63:0] portsDIO_filtered_2_bits_data; // @[Xbar.scala:352:24] assign anonIn_2_d_bits_data = in_2_d_bits_data; // @[Xbar.scala:159:18] wire portsDIO_filtered_2_bits_corrupt; // @[Xbar.scala:352:24] assign anonIn_2_d_bits_corrupt = in_2_d_bits_corrupt; // @[Xbar.scala:159:18] wire portsAOI_filtered_3_0_ready; // @[Xbar.scala:352:24] assign anonIn_3_a_ready = in_3_a_ready; // @[Xbar.scala:159:18] wire _portsAOI_filtered_0_valid_T_7 = in_3_a_valid; // @[Xbar.scala:159:18, :355:40] wire [2:0] portsAOI_filtered_3_0_bits_opcode = in_3_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_3_0_bits_size = in_3_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [5:0] portsAOI_filtered_3_0_bits_source = in_3_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [31:0] _requestAIO_T_15 = in_3_a_bits_address; // @[Xbar.scala:159:18] wire [31:0] portsAOI_filtered_3_0_bits_address = in_3_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_3_0_bits_mask = in_3_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_3_0_bits_data = in_3_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire portsDIO_filtered_3_ready = in_3_d_ready; // @[Xbar.scala:159:18, :352:24] wire portsDIO_filtered_3_valid; // @[Xbar.scala:352:24] assign anonIn_3_d_valid = in_3_d_valid; // @[Xbar.scala:159:18] wire [2:0] portsDIO_filtered_3_bits_opcode; // @[Xbar.scala:352:24] assign anonIn_3_d_bits_opcode = in_3_d_bits_opcode; // @[Xbar.scala:159:18] wire [1:0] portsDIO_filtered_3_bits_param; // @[Xbar.scala:352:24] assign anonIn_3_d_bits_param = in_3_d_bits_param; // @[Xbar.scala:159:18] wire [3:0] portsDIO_filtered_3_bits_size; // @[Xbar.scala:352:24] assign anonIn_3_d_bits_size = in_3_d_bits_size; // @[Xbar.scala:159:18] wire [5:0] portsDIO_filtered_3_bits_source; // @[Xbar.scala:352:24] wire [3:0] portsDIO_filtered_3_bits_sink; // @[Xbar.scala:352:24] assign anonIn_3_d_bits_sink = in_3_d_bits_sink; // @[Xbar.scala:159:18] wire portsDIO_filtered_3_bits_denied; // @[Xbar.scala:352:24] assign anonIn_3_d_bits_denied = in_3_d_bits_denied; // @[Xbar.scala:159:18] wire [63:0] portsDIO_filtered_3_bits_data; // @[Xbar.scala:352:24] assign anonIn_3_d_bits_data = in_3_d_bits_data; // @[Xbar.scala:159:18] wire portsDIO_filtered_3_bits_corrupt; // @[Xbar.scala:352:24] assign anonIn_3_d_bits_corrupt = in_3_d_bits_corrupt; // @[Xbar.scala:159:18] wire [5:0] in_0_d_bits_source; // @[Xbar.scala:159:18] wire [5:0] in_1_d_bits_source; // @[Xbar.scala:159:18] wire [5:0] in_2_d_bits_source; // @[Xbar.scala:159:18] wire [5:0] in_3_d_bits_source; // @[Xbar.scala:159:18] assign in_1_a_bits_source = {2'h0, _in_1_a_bits_source_T}; // @[Xbar.scala:159:18, :166:{29,55}] assign _anonIn_d_bits_source_T = in_1_d_bits_source[3:0]; // @[Xbar.scala:156:69, :159:18] assign anonIn_1_d_bits_source = _anonIn_d_bits_source_T; // @[Xbar.scala:156:69] wire [4:0] _in_2_a_bits_source_T = {2'h3, anonIn_2_a_bits_source}; // @[Xbar.scala:166:55] assign in_2_a_bits_source = {1'h0, _in_2_a_bits_source_T}; // @[Xbar.scala:159:18, :166:{29,55}] assign _anonIn_d_bits_source_T_1 = in_2_d_bits_source[2:0]; // @[Xbar.scala:156:69, :159:18] assign anonIn_2_d_bits_source = _anonIn_d_bits_source_T_1; // @[Xbar.scala:156:69] wire [4:0] _in_3_a_bits_source_T = {2'h2, anonIn_3_a_bits_source}; // @[Xbar.scala:166:55] assign in_3_a_bits_source = {1'h0, _in_3_a_bits_source_T}; // @[Xbar.scala:159:18, :166:{29,55}] assign _anonIn_d_bits_source_T_2 = in_3_d_bits_source[2:0]; // @[Xbar.scala:156:69, :159:18] assign anonIn_3_d_bits_source = _anonIn_d_bits_source_T_2; // @[Xbar.scala:156:69] wire _out_0_a_valid_T_10; // @[Arbiter.scala:96:24] assign anonOut_a_valid = out_0_a_valid; // @[Xbar.scala:216:19] wire [2:0] _out_0_a_bits_WIRE_opcode; // @[Mux.scala:30:73] assign anonOut_a_bits_opcode = out_0_a_bits_opcode; // @[Xbar.scala:216:19] wire [2:0] _out_0_a_bits_WIRE_param; // @[Mux.scala:30:73] assign anonOut_a_bits_param = out_0_a_bits_param; // @[Xbar.scala:216:19] wire [3:0] _out_0_a_bits_WIRE_size; // @[Mux.scala:30:73] assign anonOut_a_bits_size = out_0_a_bits_size; // @[Xbar.scala:216:19] wire [5:0] _out_0_a_bits_WIRE_source; // @[Mux.scala:30:73] assign anonOut_a_bits_source = out_0_a_bits_source; // @[Xbar.scala:216:19] wire [31:0] _out_0_a_bits_WIRE_address; // @[Mux.scala:30:73] assign anonOut_a_bits_address = out_0_a_bits_address; // @[Xbar.scala:216:19] wire [7:0] _out_0_a_bits_WIRE_mask; // @[Mux.scala:30:73] assign anonOut_a_bits_mask = out_0_a_bits_mask; // @[Xbar.scala:216:19] wire [63:0] _out_0_a_bits_WIRE_data; // @[Mux.scala:30:73] assign anonOut_a_bits_data = out_0_a_bits_data; // @[Xbar.scala:216:19] wire _out_0_a_bits_WIRE_corrupt; // @[Mux.scala:30:73] assign anonOut_a_bits_corrupt = out_0_a_bits_corrupt; // @[Xbar.scala:216:19] wire _portsDIO_out_0_d_ready_WIRE; // @[Mux.scala:30:73] assign anonOut_d_ready = out_0_d_ready; // @[Xbar.scala:216:19] assign portsDIO_filtered_0_bits_opcode = out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_1_bits_opcode = out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_2_bits_opcode = out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_3_bits_opcode = out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_0_bits_param = out_0_d_bits_param; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_1_bits_param = out_0_d_bits_param; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_2_bits_param = out_0_d_bits_param; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_3_bits_param = out_0_d_bits_param; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_0_bits_size = out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_1_bits_size = out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_2_bits_size = out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_3_bits_size = out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [5:0] _requestDOI_uncommonBits_T = out_0_d_bits_source; // @[Xbar.scala:216:19] wire [5:0] _requestDOI_uncommonBits_T_1 = out_0_d_bits_source; // @[Xbar.scala:216:19] wire [5:0] _requestDOI_uncommonBits_T_2 = out_0_d_bits_source; // @[Xbar.scala:216:19] assign portsDIO_filtered_0_bits_source = out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_1_bits_source = out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_2_bits_source = out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_3_bits_source = out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_0_bits_sink = out_0_d_bits_sink; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_1_bits_sink = out_0_d_bits_sink; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_2_bits_sink = out_0_d_bits_sink; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_3_bits_sink = out_0_d_bits_sink; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_0_bits_denied = out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_1_bits_denied = out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_2_bits_denied = out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_3_bits_denied = out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_0_bits_data = out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_1_bits_data = out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_2_bits_data = out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_3_bits_data = out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_0_bits_corrupt = out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_1_bits_corrupt = out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_2_bits_corrupt = out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] assign portsDIO_filtered_3_bits_corrupt = out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] assign out_0_d_bits_sink = _out_0_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53] wire [32:0] _requestAIO_T_1 = {1'h0, _requestAIO_T}; // @[Parameters.scala:137:{31,41}] wire [32:0] _requestAIO_T_6 = {1'h0, _requestAIO_T_5}; // @[Parameters.scala:137:{31,41}] wire [32:0] _requestAIO_T_11 = {1'h0, _requestAIO_T_10}; // @[Parameters.scala:137:{31,41}] wire [32:0] _requestAIO_T_16 = {1'h0, _requestAIO_T_15}; // @[Parameters.scala:137:{31,41}] wire requestDOI_0_0 = out_0_d_bits_source == 6'h20; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T = requestDOI_0_0; // @[Xbar.scala:355:54] wire [3:0] requestDOI_uncommonBits = _requestDOI_uncommonBits_T[3:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] _requestDOI_T = out_0_d_bits_source[5:4]; // @[Xbar.scala:216:19] wire _requestDOI_T_1 = _requestDOI_T == 2'h0; // @[Parameters.scala:54:{10,32}] wire _requestDOI_T_3 = _requestDOI_T_1; // @[Parameters.scala:54:{32,67}] wire requestDOI_0_1 = _requestDOI_T_3; // @[Parameters.scala:54:67, :56:48] wire _portsDIO_filtered_1_valid_T = requestDOI_0_1; // @[Xbar.scala:355:54] wire [2:0] requestDOI_uncommonBits_1 = _requestDOI_uncommonBits_T_1[2:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] _requestDOI_T_5 = out_0_d_bits_source[5:3]; // @[Xbar.scala:216:19] wire [2:0] _requestDOI_T_10 = out_0_d_bits_source[5:3]; // @[Xbar.scala:216:19] wire _requestDOI_T_6 = _requestDOI_T_5 == 3'h3; // @[Parameters.scala:54:{10,32}] wire _requestDOI_T_8 = _requestDOI_T_6; // @[Parameters.scala:54:{32,67}] wire requestDOI_0_2 = _requestDOI_T_8; // @[Parameters.scala:54:67, :56:48] wire _portsDIO_filtered_2_valid_T = requestDOI_0_2; // @[Xbar.scala:355:54] wire _portsDIO_out_0_d_ready_T_2 = requestDOI_0_2; // @[Mux.scala:30:73] wire [2:0] requestDOI_uncommonBits_2 = _requestDOI_uncommonBits_T_2[2:0]; // @[Parameters.scala:52:{29,56}] wire _requestDOI_T_11 = _requestDOI_T_10 == 3'h2; // @[Parameters.scala:54:{10,32}] wire _requestDOI_T_13 = _requestDOI_T_11; // @[Parameters.scala:54:{32,67}] wire requestDOI_0_3 = _requestDOI_T_13; // @[Parameters.scala:54:67, :56:48] wire _portsDIO_filtered_3_valid_T = requestDOI_0_3; // @[Xbar.scala:355:54] wire [26:0] _beatsAI_decode_T = 27'hFFF << in_0_a_bits_size; // @[package.scala:243:71] wire [11:0] _beatsAI_decode_T_1 = _beatsAI_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _beatsAI_decode_T_2 = ~_beatsAI_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] beatsAI_decode = _beatsAI_decode_T_2[11:3]; // @[package.scala:243:46] wire _beatsAI_opdata_T = in_0_a_bits_opcode[2]; // @[Xbar.scala:159:18] wire beatsAI_opdata = ~_beatsAI_opdata_T; // @[Edges.scala:92:{28,37}] wire [8:0] beatsAI_0 = beatsAI_opdata ? beatsAI_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] wire [26:0] _beatsAI_decode_T_3 = 27'hFFF << in_1_a_bits_size; // @[package.scala:243:71] wire [11:0] _beatsAI_decode_T_4 = _beatsAI_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _beatsAI_decode_T_5 = ~_beatsAI_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] beatsAI_decode_1 = _beatsAI_decode_T_5[11:3]; // @[package.scala:243:46] wire _beatsAI_opdata_T_1 = in_1_a_bits_opcode[2]; // @[Xbar.scala:159:18] wire beatsAI_opdata_1 = ~_beatsAI_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [8:0] beatsAI_1 = beatsAI_opdata_1 ? beatsAI_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] wire [26:0] _beatsAI_decode_T_6 = 27'hFFF << in_2_a_bits_size; // @[package.scala:243:71] wire [11:0] _beatsAI_decode_T_7 = _beatsAI_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _beatsAI_decode_T_8 = ~_beatsAI_decode_T_7; // @[package.scala:243:{46,76}] wire [8:0] beatsAI_decode_2 = _beatsAI_decode_T_8[11:3]; // @[package.scala:243:46] wire [26:0] _beatsAI_decode_T_9 = 27'hFFF << in_3_a_bits_size; // @[package.scala:243:71] wire [11:0] _beatsAI_decode_T_10 = _beatsAI_decode_T_9[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _beatsAI_decode_T_11 = ~_beatsAI_decode_T_10; // @[package.scala:243:{46,76}] wire [8:0] beatsAI_decode_3 = _beatsAI_decode_T_11[11:3]; // @[package.scala:243:46] wire _beatsAI_opdata_T_3 = in_3_a_bits_opcode[2]; // @[Xbar.scala:159:18] wire beatsAI_opdata_3 = ~_beatsAI_opdata_T_3; // @[Edges.scala:92:{28,37}] wire [8:0] beatsAI_3 = beatsAI_opdata_3 ? beatsAI_decode_3 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] wire [26:0] _beatsDO_decode_T = 27'hFFF << out_0_d_bits_size; // @[package.scala:243:71] wire [11:0] _beatsDO_decode_T_1 = _beatsDO_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _beatsDO_decode_T_2 = ~_beatsDO_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] beatsDO_decode = _beatsDO_decode_T_2[11:3]; // @[package.scala:243:46] wire beatsDO_opdata = out_0_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [8:0] beatsDO_0 = beatsDO_opdata ? beatsDO_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire _filtered_0_ready_T; // @[Arbiter.scala:94:31] assign in_0_a_ready = portsAOI_filtered_0_ready; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_0_valid; // @[Xbar.scala:352:24] assign portsAOI_filtered_0_valid = _portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_1; // @[Arbiter.scala:94:31] assign in_1_a_ready = portsAOI_filtered_1_0_ready; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_1_0_valid; // @[Xbar.scala:352:24] assign portsAOI_filtered_1_0_valid = _portsAOI_filtered_0_valid_T_3; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_2; // @[Arbiter.scala:94:31] assign in_2_a_ready = portsAOI_filtered_2_0_ready; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_2_0_valid; // @[Xbar.scala:352:24] assign portsAOI_filtered_2_0_valid = _portsAOI_filtered_0_valid_T_5; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_3; // @[Arbiter.scala:94:31] assign in_3_a_ready = portsAOI_filtered_3_0_ready; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_3_0_valid; // @[Xbar.scala:352:24] assign portsAOI_filtered_3_0_valid = _portsAOI_filtered_0_valid_T_7; // @[Xbar.scala:352:24, :355:40] wire _portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:355:40] assign in_0_d_valid = portsDIO_filtered_0_valid; // @[Xbar.scala:159:18, :352:24] assign in_0_d_bits_opcode = portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:159:18, :352:24] assign in_0_d_bits_param = portsDIO_filtered_0_bits_param; // @[Xbar.scala:159:18, :352:24] assign in_0_d_bits_size = portsDIO_filtered_0_bits_size; // @[Xbar.scala:159:18, :352:24] assign in_0_d_bits_source = portsDIO_filtered_0_bits_source; // @[Xbar.scala:159:18, :352:24] assign in_0_d_bits_sink = portsDIO_filtered_0_bits_sink; // @[Xbar.scala:159:18, :352:24] assign in_0_d_bits_denied = portsDIO_filtered_0_bits_denied; // @[Xbar.scala:159:18, :352:24] assign in_0_d_bits_data = portsDIO_filtered_0_bits_data; // @[Xbar.scala:159:18, :352:24] assign in_0_d_bits_corrupt = portsDIO_filtered_0_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire _portsDIO_filtered_1_valid_T_1; // @[Xbar.scala:355:40] assign in_1_d_valid = portsDIO_filtered_1_valid; // @[Xbar.scala:159:18, :352:24] assign in_1_d_bits_opcode = portsDIO_filtered_1_bits_opcode; // @[Xbar.scala:159:18, :352:24] assign in_1_d_bits_param = portsDIO_filtered_1_bits_param; // @[Xbar.scala:159:18, :352:24] assign in_1_d_bits_size = portsDIO_filtered_1_bits_size; // @[Xbar.scala:159:18, :352:24] assign in_1_d_bits_source = portsDIO_filtered_1_bits_source; // @[Xbar.scala:159:18, :352:24] assign in_1_d_bits_sink = portsDIO_filtered_1_bits_sink; // @[Xbar.scala:159:18, :352:24] assign in_1_d_bits_denied = portsDIO_filtered_1_bits_denied; // @[Xbar.scala:159:18, :352:24] assign in_1_d_bits_data = portsDIO_filtered_1_bits_data; // @[Xbar.scala:159:18, :352:24] assign in_1_d_bits_corrupt = portsDIO_filtered_1_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire _portsDIO_filtered_2_valid_T_1; // @[Xbar.scala:355:40] assign in_2_d_valid = portsDIO_filtered_2_valid; // @[Xbar.scala:159:18, :352:24] assign in_2_d_bits_opcode = portsDIO_filtered_2_bits_opcode; // @[Xbar.scala:159:18, :352:24] assign in_2_d_bits_param = portsDIO_filtered_2_bits_param; // @[Xbar.scala:159:18, :352:24] assign in_2_d_bits_size = portsDIO_filtered_2_bits_size; // @[Xbar.scala:159:18, :352:24] assign in_2_d_bits_source = portsDIO_filtered_2_bits_source; // @[Xbar.scala:159:18, :352:24] assign in_2_d_bits_sink = portsDIO_filtered_2_bits_sink; // @[Xbar.scala:159:18, :352:24] assign in_2_d_bits_denied = portsDIO_filtered_2_bits_denied; // @[Xbar.scala:159:18, :352:24] assign in_2_d_bits_data = portsDIO_filtered_2_bits_data; // @[Xbar.scala:159:18, :352:24] assign in_2_d_bits_corrupt = portsDIO_filtered_2_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire _portsDIO_filtered_3_valid_T_1; // @[Xbar.scala:355:40] assign in_3_d_valid = portsDIO_filtered_3_valid; // @[Xbar.scala:159:18, :352:24] assign in_3_d_bits_opcode = portsDIO_filtered_3_bits_opcode; // @[Xbar.scala:159:18, :352:24] assign in_3_d_bits_param = portsDIO_filtered_3_bits_param; // @[Xbar.scala:159:18, :352:24] assign in_3_d_bits_size = portsDIO_filtered_3_bits_size; // @[Xbar.scala:159:18, :352:24] assign in_3_d_bits_source = portsDIO_filtered_3_bits_source; // @[Xbar.scala:159:18, :352:24] assign in_3_d_bits_sink = portsDIO_filtered_3_bits_sink; // @[Xbar.scala:159:18, :352:24] assign in_3_d_bits_denied = portsDIO_filtered_3_bits_denied; // @[Xbar.scala:159:18, :352:24] assign in_3_d_bits_data = portsDIO_filtered_3_bits_data; // @[Xbar.scala:159:18, :352:24] assign in_3_d_bits_corrupt = portsDIO_filtered_3_bits_corrupt; // @[Xbar.scala:159:18, :352:24] assign _portsDIO_filtered_0_valid_T_1 = out_0_d_valid & _portsDIO_filtered_0_valid_T; // @[Xbar.scala:216:19, :355:{40,54}] assign portsDIO_filtered_0_valid = _portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsDIO_filtered_1_valid_T_1 = out_0_d_valid & _portsDIO_filtered_1_valid_T; // @[Xbar.scala:216:19, :355:{40,54}] assign portsDIO_filtered_1_valid = _portsDIO_filtered_1_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsDIO_filtered_2_valid_T_1 = out_0_d_valid & _portsDIO_filtered_2_valid_T; // @[Xbar.scala:216:19, :355:{40,54}] assign portsDIO_filtered_2_valid = _portsDIO_filtered_2_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsDIO_filtered_3_valid_T_1 = out_0_d_valid & _portsDIO_filtered_3_valid_T; // @[Xbar.scala:216:19, :355:{40,54}] assign portsDIO_filtered_3_valid = _portsDIO_filtered_3_valid_T_1; // @[Xbar.scala:352:24, :355:40] wire _portsDIO_out_0_d_ready_T = requestDOI_0_0 & portsDIO_filtered_0_ready; // @[Mux.scala:30:73] wire _portsDIO_out_0_d_ready_T_1 = requestDOI_0_1 & portsDIO_filtered_1_ready; // @[Mux.scala:30:73] wire _portsDIO_out_0_d_ready_T_3 = requestDOI_0_3 & portsDIO_filtered_3_ready; // @[Mux.scala:30:73] wire _portsDIO_out_0_d_ready_T_4 = _portsDIO_out_0_d_ready_T | _portsDIO_out_0_d_ready_T_1; // @[Mux.scala:30:73] wire _portsDIO_out_0_d_ready_T_5 = _portsDIO_out_0_d_ready_T_4 | _portsDIO_out_0_d_ready_T_2; // @[Mux.scala:30:73] wire _portsDIO_out_0_d_ready_T_6 = _portsDIO_out_0_d_ready_T_5 | _portsDIO_out_0_d_ready_T_3; // @[Mux.scala:30:73] assign _portsDIO_out_0_d_ready_WIRE = _portsDIO_out_0_d_ready_T_6; // @[Mux.scala:30:73] assign out_0_d_ready = _portsDIO_out_0_d_ready_WIRE; // @[Mux.scala:30:73] reg [8:0] beatsLeft; // @[Arbiter.scala:60:30] wire idle = beatsLeft == 9'h0; // @[Arbiter.scala:60:30, :61:28] wire latch = idle & out_0_a_ready; // @[Xbar.scala:216:19] wire [1:0] readys_lo = {portsAOI_filtered_1_0_valid, portsAOI_filtered_0_valid}; // @[Xbar.scala:352:24] wire [1:0] readys_hi = {portsAOI_filtered_3_0_valid, portsAOI_filtered_2_0_valid}; // @[Xbar.scala:352:24] wire [3:0] _readys_T = {readys_hi, readys_lo}; // @[Arbiter.scala:68:51] wire [3:0] readys_valid = _readys_T; // @[Arbiter.scala:21:23, :68:51] wire _readys_T_1 = readys_valid == _readys_T; // @[Arbiter.scala:21:23, :22:19, :68:51] wire _readys_T_3 = ~_readys_T_2; // @[Arbiter.scala:22:12] wire _readys_T_4 = ~_readys_T_1; // @[Arbiter.scala:22:{12,19}] reg [3:0] readys_mask; // @[Arbiter.scala:23:23] wire [3:0] _readys_filter_T = ~readys_mask; // @[Arbiter.scala:23:23, :24:30] wire [3:0] _readys_filter_T_1 = readys_valid & _readys_filter_T; // @[Arbiter.scala:21:23, :24:{28,30}] wire [7:0] readys_filter = {_readys_filter_T_1, readys_valid}; // @[Arbiter.scala:21:23, :24:{21,28}] wire [6:0] _readys_unready_T = readys_filter[7:1]; // @[package.scala:262:48] wire [7:0] _readys_unready_T_1 = {readys_filter[7], readys_filter[6:0] | _readys_unready_T}; // @[package.scala:262:{43,48}] wire [5:0] _readys_unready_T_2 = _readys_unready_T_1[7:2]; // @[package.scala:262:{43,48}] wire [7:0] _readys_unready_T_3 = {_readys_unready_T_1[7:6], _readys_unready_T_1[5:0] | _readys_unready_T_2}; // @[package.scala:262:{43,48}] wire [7:0] _readys_unready_T_4 = _readys_unready_T_3; // @[package.scala:262:43, :263:17] wire [6:0] _readys_unready_T_5 = _readys_unready_T_4[7:1]; // @[package.scala:263:17] wire [7:0] _readys_unready_T_6 = {readys_mask, 4'h0}; // @[Arbiter.scala:23:23, :25:66] wire [7:0] readys_unready = {1'h0, _readys_unready_T_5} | _readys_unready_T_6; // @[Arbiter.scala:25:{52,58,66}] wire [3:0] _readys_readys_T = readys_unready[7:4]; // @[Arbiter.scala:25:58, :26:29] wire [3:0] _readys_readys_T_1 = readys_unready[3:0]; // @[Arbiter.scala:25:58, :26:48] wire [3:0] _readys_readys_T_2 = _readys_readys_T & _readys_readys_T_1; // @[Arbiter.scala:26:{29,39,48}] wire [3:0] readys_readys = ~_readys_readys_T_2; // @[Arbiter.scala:26:{18,39}] wire [3:0] _readys_T_7 = readys_readys; // @[Arbiter.scala:26:18, :30:11] wire _readys_T_5 = |readys_valid; // @[Arbiter.scala:21:23, :27:27] wire _readys_T_6 = latch & _readys_T_5; // @[Arbiter.scala:27:{18,27}, :62:24] wire [3:0] _readys_mask_T = readys_readys & readys_valid; // @[Arbiter.scala:21:23, :26:18, :28:29] wire [4:0] _readys_mask_T_1 = {_readys_mask_T, 1'h0}; // @[package.scala:253:48] wire [3:0] _readys_mask_T_2 = _readys_mask_T_1[3:0]; // @[package.scala:253:{48,53}] wire [3:0] _readys_mask_T_3 = _readys_mask_T | _readys_mask_T_2; // @[package.scala:253:{43,53}] wire [5:0] _readys_mask_T_4 = {_readys_mask_T_3, 2'h0}; // @[package.scala:253:{43,48}] wire [3:0] _readys_mask_T_5 = _readys_mask_T_4[3:0]; // @[package.scala:253:{48,53}] wire [3:0] _readys_mask_T_6 = _readys_mask_T_3 | _readys_mask_T_5; // @[package.scala:253:{43,53}] wire [3:0] _readys_mask_T_7 = _readys_mask_T_6; // @[package.scala:253:43, :254:17] wire _readys_T_8 = _readys_T_7[0]; // @[Arbiter.scala:30:11, :68:76] wire readys_0 = _readys_T_8; // @[Arbiter.scala:68:{27,76}] wire _readys_T_9 = _readys_T_7[1]; // @[Arbiter.scala:30:11, :68:76] wire readys_1 = _readys_T_9; // @[Arbiter.scala:68:{27,76}] wire _readys_T_10 = _readys_T_7[2]; // @[Arbiter.scala:30:11, :68:76] wire readys_2 = _readys_T_10; // @[Arbiter.scala:68:{27,76}] wire _readys_T_11 = _readys_T_7[3]; // @[Arbiter.scala:30:11, :68:76] wire readys_3 = _readys_T_11; // @[Arbiter.scala:68:{27,76}] wire _winner_T = readys_0 & portsAOI_filtered_0_valid; // @[Xbar.scala:352:24] wire winner_0 = _winner_T; // @[Arbiter.scala:71:{27,69}] wire _winner_T_1 = readys_1 & portsAOI_filtered_1_0_valid; // @[Xbar.scala:352:24] wire winner_1 = _winner_T_1; // @[Arbiter.scala:71:{27,69}] wire _winner_T_2 = readys_2 & portsAOI_filtered_2_0_valid; // @[Xbar.scala:352:24] wire winner_2 = _winner_T_2; // @[Arbiter.scala:71:{27,69}] wire _winner_T_3 = readys_3 & portsAOI_filtered_3_0_valid; // @[Xbar.scala:352:24] wire winner_3 = _winner_T_3; // @[Arbiter.scala:71:{27,69}] wire prefixOR_1 = winner_0; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_2 = prefixOR_1 | winner_1; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_3 = prefixOR_2 | winner_2; // @[Arbiter.scala:71:27, :76:48] wire _prefixOR_T = prefixOR_3 | winner_3; // @[Arbiter.scala:71:27, :76:48] wire _out_0_a_valid_T = portsAOI_filtered_0_valid | portsAOI_filtered_1_0_valid; // @[Xbar.scala:352:24]
Generate the Verilog code corresponding to the following Chisel files. File Crossing.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg} @deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2") class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val intnode = IntAdapterNode() lazy val module = new Impl class Impl extends LazyModuleImp(this) { (intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) => out := SynchronizerShiftReg(in, sync) } } } object IntSyncCrossingSource { def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) = { val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered)) intsource.node } } class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule { val node = IntSyncSourceNode(alreadyRegistered) lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl) class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := AsyncResetReg(Cat(in.reverse)).asBools } } class ImplRegistered extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := in } } } object IntSyncCrossingSink { @deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2") def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(sync) lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := SynchronizerShiftReg(in.sync, sync) } } } object IntSyncAsyncCrossingSink { def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(0) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := in.sync } } } object IntSyncSyncCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncSyncCrossingSink()) intsink.node } } class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(1) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := RegNext(in.sync) } } } object IntSyncRationalCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncRationalCrossingSink()) intsink.node } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module IntSyncSyncCrossingSink_n1x2_1( // @[Crossing.scala:96:9] input auto_in_sync_0, // @[LazyModuleImp.scala:107:25] input auto_in_sync_1, // @[LazyModuleImp.scala:107:25] output auto_out_0, // @[LazyModuleImp.scala:107:25] output auto_out_1 // @[LazyModuleImp.scala:107:25] ); wire auto_in_sync_0_0 = auto_in_sync_0; // @[Crossing.scala:96:9] wire auto_in_sync_1_0 = auto_in_sync_1; // @[Crossing.scala:96:9] wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31] wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31] wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25] wire nodeIn_sync_0 = auto_in_sync_0_0; // @[Crossing.scala:96:9] wire nodeIn_sync_1 = auto_in_sync_1_0; // @[Crossing.scala:96:9] wire nodeOut_0; // @[MixedNode.scala:542:17] wire nodeOut_1; // @[MixedNode.scala:542:17] wire auto_out_0_0; // @[Crossing.scala:96:9] wire auto_out_1_0; // @[Crossing.scala:96:9] assign nodeOut_0 = nodeIn_sync_0; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_1 = nodeIn_sync_1; // @[MixedNode.scala:542:17, :551:17] assign auto_out_0_0 = nodeOut_0; // @[Crossing.scala:96:9] assign auto_out_1_0 = nodeOut_1; // @[Crossing.scala:96:9] assign auto_out_0 = auto_out_0_0; // @[Crossing.scala:96:9] assign auto_out_1 = auto_out_1_0; // @[Crossing.scala:96:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } } File rename-stage.scala: //****************************************************************************** // Copyright (c) 2012 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Datapath: Rename Logic //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // // Supports 1-cycle and 2-cycle latencies. (aka, passthrough versus registers between ren1 and ren2). // - ren1: read the map tables and allocate a new physical register from the freelist. // - ren2: read the busy table for the physical operands. // // Ren1 data is provided as an output to be fed directly into the ROB. package boom.v3.exu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import boom.v3.common._ import boom.v3.util._ /** * IO bundle to interface with the Register Rename logic * * @param plWidth pipeline width * @param numIntPregs number of int physical registers * @param numFpPregs number of FP physical registers * @param numWbPorts number of int writeback ports * @param numWbPorts number of FP writeback ports */ class RenameStageIO( val plWidth: Int, val numPhysRegs: Int, val numWbPorts: Int) (implicit p: Parameters) extends BoomBundle /** * IO bundle to debug the rename stage */ class DebugRenameStageIO(val numPhysRegs: Int)(implicit p: Parameters) extends BoomBundle { val freelist = Bits(numPhysRegs.W) val isprlist = Bits(numPhysRegs.W) val busytable = UInt(numPhysRegs.W) } abstract class AbstractRenameStage( plWidth: Int, numPhysRegs: Int, numWbPorts: Int) (implicit p: Parameters) extends BoomModule { val io = IO(new Bundle { val ren_stalls = Output(Vec(plWidth, Bool())) val kill = Input(Bool()) val dec_fire = Input(Vec(plWidth, Bool())) // will commit state updates val dec_uops = Input(Vec(plWidth, new MicroOp())) // physical specifiers available AND busy/ready status available. val ren2_mask = Vec(plWidth, Output(Bool())) // mask of valid instructions val ren2_uops = Vec(plWidth, Output(new MicroOp())) // branch resolution (execute) val brupdate = Input(new BrUpdateInfo()) val dis_fire = Input(Vec(coreWidth, Bool())) val dis_ready = Input(Bool()) // wakeup ports val wakeups = Flipped(Vec(numWbPorts, Valid(new ExeUnitResp(xLen)))) // commit stage val com_valids = Input(Vec(plWidth, Bool())) val com_uops = Input(Vec(plWidth, new MicroOp())) val rbk_valids = Input(Vec(plWidth, Bool())) val rollback = Input(Bool()) val debug_rob_empty = Input(Bool()) val debug = Output(new DebugRenameStageIO(numPhysRegs)) }) io.ren_stalls.foreach(_ := false.B) io.debug := DontCare def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp //------------------------------------------------------------- // Pipeline State & Wires // Stage 1 val ren1_fire = Wire(Vec(plWidth, Bool())) val ren1_uops = Wire(Vec(plWidth, new MicroOp)) // Stage 2 val ren2_fire = io.dis_fire val ren2_ready = io.dis_ready val ren2_valids = Wire(Vec(plWidth, Bool())) val ren2_uops = Wire(Vec(plWidth, new MicroOp)) val ren2_alloc_reqs = Wire(Vec(plWidth, Bool())) //------------------------------------------------------------- // pipeline registers for (w <- 0 until plWidth) { ren1_fire(w) := io.dec_fire(w) ren1_uops(w) := io.dec_uops(w) } for (w <- 0 until plWidth) { val r_valid = RegInit(false.B) val r_uop = Reg(new MicroOp) val next_uop = Wire(new MicroOp) next_uop := r_uop when (io.kill) { r_valid := false.B } .elsewhen (ren2_ready) { r_valid := ren1_fire(w) next_uop := ren1_uops(w) } .otherwise { r_valid := r_valid && !ren2_fire(w) // clear bit if uop gets dispatched next_uop := r_uop } r_uop := GetNewUopAndBrMask(BypassAllocations(next_uop, ren2_uops, ren2_alloc_reqs), io.brupdate) ren2_valids(w) := r_valid ren2_uops(w) := r_uop } //------------------------------------------------------------- // Outputs io.ren2_mask := ren2_valids } /** * Rename stage that connets the map table, free list, and busy table. * Can be used in both the FP pipeline and the normal execute pipeline. * * @param plWidth pipeline width * @param numWbPorts number of int writeback ports * @param numWbPorts number of FP writeback ports */ class RenameStage( plWidth: Int, numPhysRegs: Int, numWbPorts: Int, float: Boolean) (implicit p: Parameters) extends AbstractRenameStage(plWidth, numPhysRegs, numWbPorts)(p) { val pregSz = log2Ceil(numPhysRegs) val rtype = if (float) RT_FLT else RT_FIX //------------------------------------------------------------- // Helper Functions def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp = { val bypassed_uop = Wire(new MicroOp) bypassed_uop := uop val bypass_hits_rs1 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs1 } val bypass_hits_rs2 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs2 } val bypass_hits_rs3 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs3 } val bypass_hits_dst = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.ldst } val bypass_sel_rs1 = PriorityEncoderOH(bypass_hits_rs1.reverse).reverse val bypass_sel_rs2 = PriorityEncoderOH(bypass_hits_rs2.reverse).reverse val bypass_sel_rs3 = PriorityEncoderOH(bypass_hits_rs3.reverse).reverse val bypass_sel_dst = PriorityEncoderOH(bypass_hits_dst.reverse).reverse val do_bypass_rs1 = bypass_hits_rs1.reduce(_||_) val do_bypass_rs2 = bypass_hits_rs2.reduce(_||_) val do_bypass_rs3 = bypass_hits_rs3.reduce(_||_) val do_bypass_dst = bypass_hits_dst.reduce(_||_) val bypass_pdsts = older_uops.map(_.pdst) when (do_bypass_rs1) { bypassed_uop.prs1 := Mux1H(bypass_sel_rs1, bypass_pdsts) } when (do_bypass_rs2) { bypassed_uop.prs2 := Mux1H(bypass_sel_rs2, bypass_pdsts) } when (do_bypass_rs3) { bypassed_uop.prs3 := Mux1H(bypass_sel_rs3, bypass_pdsts) } when (do_bypass_dst) { bypassed_uop.stale_pdst := Mux1H(bypass_sel_dst, bypass_pdsts) } bypassed_uop.prs1_busy := uop.prs1_busy || do_bypass_rs1 bypassed_uop.prs2_busy := uop.prs2_busy || do_bypass_rs2 bypassed_uop.prs3_busy := uop.prs3_busy || do_bypass_rs3 if (!float) { bypassed_uop.prs3 := DontCare bypassed_uop.prs3_busy := false.B } bypassed_uop } //------------------------------------------------------------- // Rename Structures val maptable = Module(new RenameMapTable( plWidth, 32, numPhysRegs, false, float)) val freelist = Module(new RenameFreeList( plWidth, numPhysRegs, if (float) 32 else 31)) val busytable = Module(new RenameBusyTable( plWidth, numPhysRegs, numWbPorts, false, float)) val ren2_br_tags = Wire(Vec(plWidth, Valid(UInt(brTagSz.W)))) // Commit/Rollback val com_valids = Wire(Vec(plWidth, Bool())) val rbk_valids = Wire(Vec(plWidth, Bool())) for (w <- 0 until plWidth) { ren2_alloc_reqs(w) := ren2_uops(w).ldst_val && ren2_uops(w).dst_rtype === rtype && ren2_fire(w) ren2_br_tags(w).valid := ren2_fire(w) && ren2_uops(w).allocate_brtag com_valids(w) := io.com_uops(w).ldst_val && io.com_uops(w).dst_rtype === rtype && io.com_valids(w) rbk_valids(w) := io.com_uops(w).ldst_val && io.com_uops(w).dst_rtype === rtype && io.rbk_valids(w) ren2_br_tags(w).bits := ren2_uops(w).br_tag } //------------------------------------------------------------- // Rename Table // Maptable inputs. val map_reqs = Wire(Vec(plWidth, new MapReq(lregSz))) val remap_reqs = Wire(Vec(plWidth, new RemapReq(lregSz, pregSz))) // Generate maptable requests. for ((((ren1,ren2),com),w) <- (ren1_uops zip ren2_uops zip io.com_uops.reverse).zipWithIndex) { map_reqs(w).lrs1 := ren1.lrs1 map_reqs(w).lrs2 := ren1.lrs2 map_reqs(w).lrs3 := ren1.lrs3 map_reqs(w).ldst := ren1.ldst remap_reqs(w).ldst := Mux(io.rollback, com.ldst , ren2.ldst) remap_reqs(w).pdst := Mux(io.rollback, com.stale_pdst, ren2.pdst) } ren2_alloc_reqs zip rbk_valids.reverse zip remap_reqs map { case ((a,r),rr) => rr.valid := a || r} // Hook up inputs. maptable.io.map_reqs := map_reqs maptable.io.remap_reqs := remap_reqs maptable.io.ren_br_tags := ren2_br_tags maptable.io.brupdate := io.brupdate maptable.io.rollback := io.rollback // Maptable outputs. for ((uop, w) <- ren1_uops.zipWithIndex) { val mappings = maptable.io.map_resps(w) uop.prs1 := mappings.prs1 uop.prs2 := mappings.prs2 uop.prs3 := mappings.prs3 // only FP has 3rd operand uop.stale_pdst := mappings.stale_pdst } //------------------------------------------------------------- // Free List // Freelist inputs. freelist.io.reqs := ren2_alloc_reqs freelist.io.dealloc_pregs zip com_valids zip rbk_valids map {case ((d,c),r) => d.valid := c || r} freelist.io.dealloc_pregs zip io.com_uops map {case (d,c) => d.bits := Mux(io.rollback, c.pdst, c.stale_pdst)} freelist.io.ren_br_tags := ren2_br_tags freelist.io.brupdate := io.brupdate freelist.io.debug.pipeline_empty := io.debug_rob_empty assert (ren2_alloc_reqs zip freelist.io.alloc_pregs map {case (r,p) => !r || p.bits =/= 0.U} reduce (_&&_), "[rename-stage] A uop is trying to allocate the zero physical register.") // Freelist outputs. for ((uop, w) <- ren2_uops.zipWithIndex) { val preg = freelist.io.alloc_pregs(w).bits uop.pdst := Mux(uop.ldst =/= 0.U || float.B, preg, 0.U) } //------------------------------------------------------------- // Busy Table busytable.io.ren_uops := ren2_uops // expects pdst to be set up. busytable.io.rebusy_reqs := ren2_alloc_reqs busytable.io.wb_valids := io.wakeups.map(_.valid) busytable.io.wb_pdsts := io.wakeups.map(_.bits.uop.pdst) assert (!(io.wakeups.map(x => x.valid && x.bits.uop.dst_rtype =/= rtype).reduce(_||_)), "[rename] Wakeup has wrong rtype.") for ((uop, w) <- ren2_uops.zipWithIndex) { val busy = busytable.io.busy_resps(w) uop.prs1_busy := uop.lrs1_rtype === rtype && busy.prs1_busy uop.prs2_busy := uop.lrs2_rtype === rtype && busy.prs2_busy uop.prs3_busy := uop.frs3_en && busy.prs3_busy val valid = ren2_valids(w) assert (!(valid && busy.prs1_busy && rtype === RT_FIX && uop.lrs1 === 0.U), "[rename] x0 is busy??") assert (!(valid && busy.prs2_busy && rtype === RT_FIX && uop.lrs2 === 0.U), "[rename] x0 is busy??") } //------------------------------------------------------------- // Outputs for (w <- 0 until plWidth) { val can_allocate = freelist.io.alloc_pregs(w).valid // Push back against Decode stage if Rename1 can't proceed. io.ren_stalls(w) := (ren2_uops(w).dst_rtype === rtype) && !can_allocate val bypassed_uop = Wire(new MicroOp) if (w > 0) bypassed_uop := BypassAllocations(ren2_uops(w), ren2_uops.slice(0,w), ren2_alloc_reqs.slice(0,w)) else bypassed_uop := ren2_uops(w) io.ren2_uops(w) := GetNewUopAndBrMask(bypassed_uop, io.brupdate) } //------------------------------------------------------------- // Debug signals io.debug.freelist := freelist.io.debug.freelist io.debug.isprlist := freelist.io.debug.isprlist io.debug.busytable := busytable.io.debug.busytable } class PredRenameStage( plWidth: Int, numPhysRegs: Int, numWbPorts: Int) (implicit p: Parameters) extends AbstractRenameStage(plWidth, numPhysRegs, numWbPorts)(p) { def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp = { uop } ren2_alloc_reqs := DontCare val busy_table = RegInit(VecInit(0.U(ftqSz.W).asBools)) val to_busy = WireInit(VecInit(0.U(ftqSz.W).asBools)) val unbusy = WireInit(VecInit(0.U(ftqSz.W).asBools)) val current_ftq_idx = Reg(UInt(log2Ceil(ftqSz).W)) var next_ftq_idx = current_ftq_idx for (w <- 0 until plWidth) { io.ren2_uops(w) := ren2_uops(w) val is_sfb_br = ren2_uops(w).is_sfb_br && ren2_fire(w) val is_sfb_shadow = ren2_uops(w).is_sfb_shadow && ren2_fire(w) val ftq_idx = ren2_uops(w).ftq_idx when (is_sfb_br) { io.ren2_uops(w).pdst := ftq_idx to_busy(ftq_idx) := true.B } next_ftq_idx = Mux(is_sfb_br, ftq_idx, next_ftq_idx) when (is_sfb_shadow) { io.ren2_uops(w).ppred := next_ftq_idx io.ren2_uops(w).ppred_busy := (busy_table(next_ftq_idx) || to_busy(next_ftq_idx)) && !unbusy(next_ftq_idx) } } for (w <- 0 until numWbPorts) { when (io.wakeups(w).valid) { unbusy(io.wakeups(w).bits.uop.pdst) := true.B } } current_ftq_idx := next_ftq_idx busy_table := ((busy_table.asUInt | to_busy.asUInt) & ~unbusy.asUInt).asBools }
module RenameStage_2( // @[rename-stage.scala:160:7] input clock, // @[rename-stage.scala:160:7] input reset, // @[rename-stage.scala:160:7] output io_ren_stalls_0, // @[rename-stage.scala:60:14] input io_kill, // @[rename-stage.scala:60:14] input io_dec_fire_0, // @[rename-stage.scala:60:14] input [6:0] io_dec_uops_0_uopc, // @[rename-stage.scala:60:14] input [31:0] io_dec_uops_0_inst, // @[rename-stage.scala:60:14] input [31:0] io_dec_uops_0_debug_inst, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_dec_uops_0_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_dec_uops_0_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_dec_uops_0_fu_code, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_br, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_jalr, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_jal, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_sfb, // @[rename-stage.scala:60:14] input [7:0] io_dec_uops_0_br_mask, // @[rename-stage.scala:60:14] input [2:0] io_dec_uops_0_br_tag, // @[rename-stage.scala:60:14] input [3:0] io_dec_uops_0_ftq_idx, // @[rename-stage.scala:60:14] input io_dec_uops_0_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_0_pc_lob, // @[rename-stage.scala:60:14] input io_dec_uops_0_taken, // @[rename-stage.scala:60:14] input [19:0] io_dec_uops_0_imm_packed, // @[rename-stage.scala:60:14] input io_dec_uops_0_exception, // @[rename-stage.scala:60:14] input [63:0] io_dec_uops_0_exc_cause, // @[rename-stage.scala:60:14] input io_dec_uops_0_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_dec_uops_0_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_0_mem_size, // @[rename-stage.scala:60:14] input io_dec_uops_0_mem_signed, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_fence, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_fencei, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_amo, // @[rename-stage.scala:60:14] input io_dec_uops_0_uses_ldq, // @[rename-stage.scala:60:14] input io_dec_uops_0_uses_stq, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_dec_uops_0_is_unique, // @[rename-stage.scala:60:14] input io_dec_uops_0_flush_on_commit, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_0_ldst, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_0_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_0_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_dec_uops_0_lrs3, // @[rename-stage.scala:60:14] input io_dec_uops_0_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_0_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_0_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_0_lrs2_rtype, // @[rename-stage.scala:60:14] input io_dec_uops_0_frs3_en, // @[rename-stage.scala:60:14] input io_dec_uops_0_fp_val, // @[rename-stage.scala:60:14] input io_dec_uops_0_fp_single, // @[rename-stage.scala:60:14] input io_dec_uops_0_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_dec_uops_0_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_dec_uops_0_bp_debug_if, // @[rename-stage.scala:60:14] input io_dec_uops_0_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_dec_uops_0_debug_fsrc, // @[rename-stage.scala:60:14] output io_ren2_mask_0, // @[rename-stage.scala:60:14] output [6:0] io_ren2_uops_0_uopc, // @[rename-stage.scala:60:14] output [31:0] io_ren2_uops_0_inst, // @[rename-stage.scala:60:14] output [31:0] io_ren2_uops_0_debug_inst, // @[rename-stage.scala:60:14] output io_ren2_uops_0_is_rvc, // @[rename-stage.scala:60:14] output [39:0] io_ren2_uops_0_debug_pc, // @[rename-stage.scala:60:14] output [2:0] io_ren2_uops_0_iq_type, // @[rename-stage.scala:60:14] output [9:0] io_ren2_uops_0_fu_code, // @[rename-stage.scala:60:14] output [3:0] io_ren2_uops_0_ctrl_br_type, // @[rename-stage.scala:60:14] output [1:0] io_ren2_uops_0_ctrl_op1_sel, // @[rename-stage.scala:60:14] output [2:0] io_ren2_uops_0_ctrl_op2_sel, // @[rename-stage.scala:60:14] output [2:0] io_ren2_uops_0_ctrl_imm_sel, // @[rename-stage.scala:60:14] output [4:0] io_ren2_uops_0_ctrl_op_fcn, // @[rename-stage.scala:60:14] output io_ren2_uops_0_ctrl_fcn_dw, // @[rename-stage.scala:60:14] output [2:0] io_ren2_uops_0_ctrl_csr_cmd, // @[rename-stage.scala:60:14] output io_ren2_uops_0_ctrl_is_load, // @[rename-stage.scala:60:14] output io_ren2_uops_0_ctrl_is_sta, // @[rename-stage.scala:60:14] output io_ren2_uops_0_ctrl_is_std, // @[rename-stage.scala:60:14] output [1:0] io_ren2_uops_0_iw_state, // @[rename-stage.scala:60:14] output io_ren2_uops_0_iw_p1_poisoned, // @[rename-stage.scala:60:14] output io_ren2_uops_0_iw_p2_poisoned, // @[rename-stage.scala:60:14] output io_ren2_uops_0_is_br, // @[rename-stage.scala:60:14] output io_ren2_uops_0_is_jalr, // @[rename-stage.scala:60:14] output io_ren2_uops_0_is_jal, // @[rename-stage.scala:60:14] output io_ren2_uops_0_is_sfb, // @[rename-stage.scala:60:14] output [7:0] io_ren2_uops_0_br_mask, // @[rename-stage.scala:60:14] output [2:0] io_ren2_uops_0_br_tag, // @[rename-stage.scala:60:14] output [3:0] io_ren2_uops_0_ftq_idx, // @[rename-stage.scala:60:14] output io_ren2_uops_0_edge_inst, // @[rename-stage.scala:60:14] output [5:0] io_ren2_uops_0_pc_lob, // @[rename-stage.scala:60:14] output io_ren2_uops_0_taken, // @[rename-stage.scala:60:14] output [19:0] io_ren2_uops_0_imm_packed, // @[rename-stage.scala:60:14] output [11:0] io_ren2_uops_0_csr_addr, // @[rename-stage.scala:60:14] output [1:0] io_ren2_uops_0_rxq_idx, // @[rename-stage.scala:60:14] output [5:0] io_ren2_uops_0_pdst, // @[rename-stage.scala:60:14] output [5:0] io_ren2_uops_0_prs1, // @[rename-stage.scala:60:14] output [5:0] io_ren2_uops_0_prs2, // @[rename-stage.scala:60:14] output io_ren2_uops_0_prs1_busy, // @[rename-stage.scala:60:14] output io_ren2_uops_0_prs2_busy, // @[rename-stage.scala:60:14] output [5:0] io_ren2_uops_0_stale_pdst, // @[rename-stage.scala:60:14] output io_ren2_uops_0_exception, // @[rename-stage.scala:60:14] output [63:0] io_ren2_uops_0_exc_cause, // @[rename-stage.scala:60:14] output io_ren2_uops_0_bypassable, // @[rename-stage.scala:60:14] output [4:0] io_ren2_uops_0_mem_cmd, // @[rename-stage.scala:60:14] output [1:0] io_ren2_uops_0_mem_size, // @[rename-stage.scala:60:14] output io_ren2_uops_0_mem_signed, // @[rename-stage.scala:60:14] output io_ren2_uops_0_is_fence, // @[rename-stage.scala:60:14] output io_ren2_uops_0_is_fencei, // @[rename-stage.scala:60:14] output io_ren2_uops_0_is_amo, // @[rename-stage.scala:60:14] output io_ren2_uops_0_uses_ldq, // @[rename-stage.scala:60:14] output io_ren2_uops_0_uses_stq, // @[rename-stage.scala:60:14] output io_ren2_uops_0_is_sys_pc2epc, // @[rename-stage.scala:60:14] output io_ren2_uops_0_is_unique, // @[rename-stage.scala:60:14] output io_ren2_uops_0_flush_on_commit, // @[rename-stage.scala:60:14] output io_ren2_uops_0_ldst_is_rs1, // @[rename-stage.scala:60:14] output [5:0] io_ren2_uops_0_ldst, // @[rename-stage.scala:60:14] output [5:0] io_ren2_uops_0_lrs1, // @[rename-stage.scala:60:14] output [5:0] io_ren2_uops_0_lrs2, // @[rename-stage.scala:60:14] output [5:0] io_ren2_uops_0_lrs3, // @[rename-stage.scala:60:14] output io_ren2_uops_0_ldst_val, // @[rename-stage.scala:60:14] output [1:0] io_ren2_uops_0_dst_rtype, // @[rename-stage.scala:60:14] output [1:0] io_ren2_uops_0_lrs1_rtype, // @[rename-stage.scala:60:14] output [1:0] io_ren2_uops_0_lrs2_rtype, // @[rename-stage.scala:60:14] output io_ren2_uops_0_frs3_en, // @[rename-stage.scala:60:14] output io_ren2_uops_0_fp_val, // @[rename-stage.scala:60:14] output io_ren2_uops_0_fp_single, // @[rename-stage.scala:60:14] output io_ren2_uops_0_xcpt_pf_if, // @[rename-stage.scala:60:14] output io_ren2_uops_0_xcpt_ae_if, // @[rename-stage.scala:60:14] output io_ren2_uops_0_xcpt_ma_if, // @[rename-stage.scala:60:14] output io_ren2_uops_0_bp_debug_if, // @[rename-stage.scala:60:14] output io_ren2_uops_0_bp_xcpt_if, // @[rename-stage.scala:60:14] output [1:0] io_ren2_uops_0_debug_fsrc, // @[rename-stage.scala:60:14] output [1:0] io_ren2_uops_0_debug_tsrc, // @[rename-stage.scala:60:14] input [7:0] io_brupdate_b1_resolve_mask, // @[rename-stage.scala:60:14] input [7:0] io_brupdate_b1_mispredict_mask, // @[rename-stage.scala:60:14] input [6:0] io_brupdate_b2_uop_uopc, // @[rename-stage.scala:60:14] input [31:0] io_brupdate_b2_uop_inst, // @[rename-stage.scala:60:14] input [31:0] io_brupdate_b2_uop_debug_inst, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_brupdate_b2_uop_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_uop_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_brupdate_b2_uop_fu_code, // @[rename-stage.scala:60:14] input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[rename-stage.scala:60:14] input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ctrl_fcn_dw, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ctrl_is_load, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ctrl_is_sta, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ctrl_is_std, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_iw_state, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_iw_p1_poisoned, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_iw_p2_poisoned, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_br, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_jalr, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_jal, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_sfb, // @[rename-stage.scala:60:14] input [7:0] io_brupdate_b2_uop_br_mask, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_uop_br_tag, // @[rename-stage.scala:60:14] input [3:0] io_brupdate_b2_uop_ftq_idx, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_pc_lob, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_taken, // @[rename-stage.scala:60:14] input [19:0] io_brupdate_b2_uop_imm_packed, // @[rename-stage.scala:60:14] input [11:0] io_brupdate_b2_uop_csr_addr, // @[rename-stage.scala:60:14] input [4:0] io_brupdate_b2_uop_rob_idx, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_uop_ldq_idx, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_uop_stq_idx, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_rxq_idx, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_pdst, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_prs1, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_prs2, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_prs3, // @[rename-stage.scala:60:14] input [3:0] io_brupdate_b2_uop_ppred, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_prs1_busy, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_prs2_busy, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_prs3_busy, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ppred_busy, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_stale_pdst, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_exception, // @[rename-stage.scala:60:14] input [63:0] io_brupdate_b2_uop_exc_cause, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_brupdate_b2_uop_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_mem_size, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_mem_signed, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_fence, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_fencei, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_amo, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_uses_ldq, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_uses_stq, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_is_unique, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_flush_on_commit, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ldst_is_rs1, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_ldst, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_brupdate_b2_uop_lrs3, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_frs3_en, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_fp_val, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_fp_single, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_xcpt_ma_if, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_bp_debug_if, // @[rename-stage.scala:60:14] input io_brupdate_b2_uop_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[rename-stage.scala:60:14] input io_brupdate_b2_valid, // @[rename-stage.scala:60:14] input io_brupdate_b2_mispredict, // @[rename-stage.scala:60:14] input io_brupdate_b2_taken, // @[rename-stage.scala:60:14] input [2:0] io_brupdate_b2_cfi_type, // @[rename-stage.scala:60:14] input [1:0] io_brupdate_b2_pc_sel, // @[rename-stage.scala:60:14] input [39:0] io_brupdate_b2_jalr_target, // @[rename-stage.scala:60:14] input [20:0] io_brupdate_b2_target_offset, // @[rename-stage.scala:60:14] input io_dis_fire_0, // @[rename-stage.scala:60:14] input io_dis_ready, // @[rename-stage.scala:60:14] input io_wakeups_0_valid, // @[rename-stage.scala:60:14] input [6:0] io_wakeups_0_bits_uop_uopc, // @[rename-stage.scala:60:14] input [31:0] io_wakeups_0_bits_uop_inst, // @[rename-stage.scala:60:14] input [31:0] io_wakeups_0_bits_uop_debug_inst, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_wakeups_0_bits_uop_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_uop_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_wakeups_0_bits_uop_fu_code, // @[rename-stage.scala:60:14] input [3:0] io_wakeups_0_bits_uop_ctrl_br_type, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_uop_ctrl_op1_sel, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_uop_ctrl_op2_sel, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_uop_ctrl_imm_sel, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_0_bits_uop_ctrl_op_fcn, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_ctrl_fcn_dw, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_uop_ctrl_csr_cmd, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_ctrl_is_load, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_ctrl_is_sta, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_ctrl_is_std, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_uop_iw_state, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_iw_p1_poisoned, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_iw_p2_poisoned, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_is_br, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_is_jalr, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_is_jal, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_is_sfb, // @[rename-stage.scala:60:14] input [7:0] io_wakeups_0_bits_uop_br_mask, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_uop_br_tag, // @[rename-stage.scala:60:14] input [3:0] io_wakeups_0_bits_uop_ftq_idx, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_uop_pc_lob, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_taken, // @[rename-stage.scala:60:14] input [19:0] io_wakeups_0_bits_uop_imm_packed, // @[rename-stage.scala:60:14] input [11:0] io_wakeups_0_bits_uop_csr_addr, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_0_bits_uop_rob_idx, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_uop_ldq_idx, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_uop_stq_idx, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_uop_rxq_idx, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_uop_pdst, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_uop_prs1, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_uop_prs2, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_uop_prs3, // @[rename-stage.scala:60:14] input [3:0] io_wakeups_0_bits_uop_ppred, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_prs1_busy, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_prs2_busy, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_prs3_busy, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_ppred_busy, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_uop_stale_pdst, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_exception, // @[rename-stage.scala:60:14] input [63:0] io_wakeups_0_bits_uop_exc_cause, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_0_bits_uop_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_uop_mem_size, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_mem_signed, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_is_fence, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_is_fencei, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_is_amo, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_uses_ldq, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_uses_stq, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_is_unique, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_flush_on_commit, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_ldst_is_rs1, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_uop_ldst, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_uop_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_uop_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_uop_lrs3, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_uop_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_uop_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_uop_lrs2_rtype, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_frs3_en, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_fp_val, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_fp_single, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_xcpt_ma_if, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_bp_debug_if, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_uop_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_uop_debug_fsrc, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_uop_debug_tsrc, // @[rename-stage.scala:60:14] input [63:0] io_wakeups_0_bits_data, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_predicated, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_valid, // @[rename-stage.scala:60:14] input [6:0] io_wakeups_0_bits_fflags_bits_uop_uopc, // @[rename-stage.scala:60:14] input [31:0] io_wakeups_0_bits_fflags_bits_uop_inst, // @[rename-stage.scala:60:14] input [31:0] io_wakeups_0_bits_fflags_bits_uop_debug_inst, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_wakeups_0_bits_fflags_bits_uop_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_fflags_bits_uop_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_wakeups_0_bits_fflags_bits_uop_fu_code, // @[rename-stage.scala:60:14] input [3:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_br_type, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op1_sel, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op2_sel, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_imm_sel, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op_fcn, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_ctrl_fcn_dw, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_csr_cmd, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_ctrl_is_load, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_ctrl_is_sta, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_ctrl_is_std, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_fflags_bits_uop_iw_state, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_iw_p1_poisoned, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_iw_p2_poisoned, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_is_br, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_is_jalr, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_is_jal, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_is_sfb, // @[rename-stage.scala:60:14] input [7:0] io_wakeups_0_bits_fflags_bits_uop_br_mask, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_fflags_bits_uop_br_tag, // @[rename-stage.scala:60:14] input [3:0] io_wakeups_0_bits_fflags_bits_uop_ftq_idx, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_fflags_bits_uop_pc_lob, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_taken, // @[rename-stage.scala:60:14] input [19:0] io_wakeups_0_bits_fflags_bits_uop_imm_packed, // @[rename-stage.scala:60:14] input [11:0] io_wakeups_0_bits_fflags_bits_uop_csr_addr, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_0_bits_fflags_bits_uop_rob_idx, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_fflags_bits_uop_ldq_idx, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_0_bits_fflags_bits_uop_stq_idx, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_fflags_bits_uop_rxq_idx, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_fflags_bits_uop_pdst, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_fflags_bits_uop_prs1, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_fflags_bits_uop_prs2, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_fflags_bits_uop_prs3, // @[rename-stage.scala:60:14] input [3:0] io_wakeups_0_bits_fflags_bits_uop_ppred, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_prs1_busy, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_prs2_busy, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_prs3_busy, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_ppred_busy, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_fflags_bits_uop_stale_pdst, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_exception, // @[rename-stage.scala:60:14] input [63:0] io_wakeups_0_bits_fflags_bits_uop_exc_cause, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_0_bits_fflags_bits_uop_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_fflags_bits_uop_mem_size, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_mem_signed, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_is_fence, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_is_fencei, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_is_amo, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_uses_ldq, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_uses_stq, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_is_unique, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_flush_on_commit, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_ldst_is_rs1, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_fflags_bits_uop_ldst, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs3, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_fflags_bits_uop_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs2_rtype, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_frs3_en, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_fp_val, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_fp_single, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_xcpt_ma_if, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_bp_debug_if, // @[rename-stage.scala:60:14] input io_wakeups_0_bits_fflags_bits_uop_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_fsrc, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_tsrc, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_0_bits_fflags_bits_flags, // @[rename-stage.scala:60:14] input io_wakeups_1_valid, // @[rename-stage.scala:60:14] input [6:0] io_wakeups_1_bits_uop_uopc, // @[rename-stage.scala:60:14] input [31:0] io_wakeups_1_bits_uop_inst, // @[rename-stage.scala:60:14] input [31:0] io_wakeups_1_bits_uop_debug_inst, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_wakeups_1_bits_uop_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_1_bits_uop_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_wakeups_1_bits_uop_fu_code, // @[rename-stage.scala:60:14] input [3:0] io_wakeups_1_bits_uop_ctrl_br_type, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_1_bits_uop_ctrl_op1_sel, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_1_bits_uop_ctrl_op2_sel, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_1_bits_uop_ctrl_imm_sel, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_1_bits_uop_ctrl_op_fcn, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_ctrl_fcn_dw, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_1_bits_uop_ctrl_csr_cmd, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_ctrl_is_load, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_ctrl_is_sta, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_ctrl_is_std, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_1_bits_uop_iw_state, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_iw_p1_poisoned, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_iw_p2_poisoned, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_is_br, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_is_jalr, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_is_jal, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_is_sfb, // @[rename-stage.scala:60:14] input [7:0] io_wakeups_1_bits_uop_br_mask, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_1_bits_uop_br_tag, // @[rename-stage.scala:60:14] input [3:0] io_wakeups_1_bits_uop_ftq_idx, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_1_bits_uop_pc_lob, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_taken, // @[rename-stage.scala:60:14] input [19:0] io_wakeups_1_bits_uop_imm_packed, // @[rename-stage.scala:60:14] input [11:0] io_wakeups_1_bits_uop_csr_addr, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_1_bits_uop_rob_idx, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_1_bits_uop_ldq_idx, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_1_bits_uop_stq_idx, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_1_bits_uop_rxq_idx, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_1_bits_uop_pdst, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_1_bits_uop_prs1, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_1_bits_uop_prs2, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_1_bits_uop_prs3, // @[rename-stage.scala:60:14] input [3:0] io_wakeups_1_bits_uop_ppred, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_prs1_busy, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_prs2_busy, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_prs3_busy, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_ppred_busy, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_1_bits_uop_stale_pdst, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_exception, // @[rename-stage.scala:60:14] input [63:0] io_wakeups_1_bits_uop_exc_cause, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_1_bits_uop_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_1_bits_uop_mem_size, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_mem_signed, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_is_fence, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_is_fencei, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_is_amo, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_uses_ldq, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_uses_stq, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_is_unique, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_flush_on_commit, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_ldst_is_rs1, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_1_bits_uop_ldst, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_1_bits_uop_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_1_bits_uop_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_1_bits_uop_lrs3, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_1_bits_uop_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_1_bits_uop_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_1_bits_uop_lrs2_rtype, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_frs3_en, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_fp_val, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_fp_single, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_xcpt_ma_if, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_bp_debug_if, // @[rename-stage.scala:60:14] input io_wakeups_1_bits_uop_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_1_bits_uop_debug_fsrc, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_1_bits_uop_debug_tsrc, // @[rename-stage.scala:60:14] input io_wakeups_2_valid, // @[rename-stage.scala:60:14] input [6:0] io_wakeups_2_bits_uop_uopc, // @[rename-stage.scala:60:14] input [31:0] io_wakeups_2_bits_uop_inst, // @[rename-stage.scala:60:14] input [31:0] io_wakeups_2_bits_uop_debug_inst, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_wakeups_2_bits_uop_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_2_bits_uop_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_wakeups_2_bits_uop_fu_code, // @[rename-stage.scala:60:14] input [3:0] io_wakeups_2_bits_uop_ctrl_br_type, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_2_bits_uop_ctrl_op1_sel, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_2_bits_uop_ctrl_op2_sel, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_2_bits_uop_ctrl_imm_sel, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_2_bits_uop_ctrl_op_fcn, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_ctrl_fcn_dw, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_2_bits_uop_ctrl_csr_cmd, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_ctrl_is_load, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_ctrl_is_sta, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_ctrl_is_std, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_2_bits_uop_iw_state, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_iw_p1_poisoned, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_iw_p2_poisoned, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_is_br, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_is_jalr, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_is_jal, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_is_sfb, // @[rename-stage.scala:60:14] input [7:0] io_wakeups_2_bits_uop_br_mask, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_2_bits_uop_br_tag, // @[rename-stage.scala:60:14] input [3:0] io_wakeups_2_bits_uop_ftq_idx, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_2_bits_uop_pc_lob, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_taken, // @[rename-stage.scala:60:14] input [19:0] io_wakeups_2_bits_uop_imm_packed, // @[rename-stage.scala:60:14] input [11:0] io_wakeups_2_bits_uop_csr_addr, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_2_bits_uop_rob_idx, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_2_bits_uop_ldq_idx, // @[rename-stage.scala:60:14] input [2:0] io_wakeups_2_bits_uop_stq_idx, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_2_bits_uop_rxq_idx, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_2_bits_uop_pdst, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_2_bits_uop_prs1, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_2_bits_uop_prs2, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_2_bits_uop_prs3, // @[rename-stage.scala:60:14] input [3:0] io_wakeups_2_bits_uop_ppred, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_prs1_busy, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_prs2_busy, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_prs3_busy, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_ppred_busy, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_2_bits_uop_stale_pdst, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_exception, // @[rename-stage.scala:60:14] input [63:0] io_wakeups_2_bits_uop_exc_cause, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_wakeups_2_bits_uop_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_2_bits_uop_mem_size, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_mem_signed, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_is_fence, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_is_fencei, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_is_amo, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_uses_ldq, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_uses_stq, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_is_unique, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_flush_on_commit, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_ldst_is_rs1, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_2_bits_uop_ldst, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_2_bits_uop_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_2_bits_uop_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_wakeups_2_bits_uop_lrs3, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_2_bits_uop_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_2_bits_uop_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_2_bits_uop_lrs2_rtype, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_frs3_en, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_fp_val, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_fp_single, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_xcpt_ma_if, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_bp_debug_if, // @[rename-stage.scala:60:14] input io_wakeups_2_bits_uop_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_2_bits_uop_debug_fsrc, // @[rename-stage.scala:60:14] input [1:0] io_wakeups_2_bits_uop_debug_tsrc, // @[rename-stage.scala:60:14] input io_com_valids_0, // @[rename-stage.scala:60:14] input [6:0] io_com_uops_0_uopc, // @[rename-stage.scala:60:14] input [31:0] io_com_uops_0_inst, // @[rename-stage.scala:60:14] input [31:0] io_com_uops_0_debug_inst, // @[rename-stage.scala:60:14] input io_com_uops_0_is_rvc, // @[rename-stage.scala:60:14] input [39:0] io_com_uops_0_debug_pc, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_0_iq_type, // @[rename-stage.scala:60:14] input [9:0] io_com_uops_0_fu_code, // @[rename-stage.scala:60:14] input [3:0] io_com_uops_0_ctrl_br_type, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_ctrl_op1_sel, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_0_ctrl_op2_sel, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_0_ctrl_imm_sel, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_0_ctrl_op_fcn, // @[rename-stage.scala:60:14] input io_com_uops_0_ctrl_fcn_dw, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_0_ctrl_csr_cmd, // @[rename-stage.scala:60:14] input io_com_uops_0_ctrl_is_load, // @[rename-stage.scala:60:14] input io_com_uops_0_ctrl_is_sta, // @[rename-stage.scala:60:14] input io_com_uops_0_ctrl_is_std, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_iw_state, // @[rename-stage.scala:60:14] input io_com_uops_0_iw_p1_poisoned, // @[rename-stage.scala:60:14] input io_com_uops_0_iw_p2_poisoned, // @[rename-stage.scala:60:14] input io_com_uops_0_is_br, // @[rename-stage.scala:60:14] input io_com_uops_0_is_jalr, // @[rename-stage.scala:60:14] input io_com_uops_0_is_jal, // @[rename-stage.scala:60:14] input io_com_uops_0_is_sfb, // @[rename-stage.scala:60:14] input [7:0] io_com_uops_0_br_mask, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_0_br_tag, // @[rename-stage.scala:60:14] input [3:0] io_com_uops_0_ftq_idx, // @[rename-stage.scala:60:14] input io_com_uops_0_edge_inst, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_pc_lob, // @[rename-stage.scala:60:14] input io_com_uops_0_taken, // @[rename-stage.scala:60:14] input [19:0] io_com_uops_0_imm_packed, // @[rename-stage.scala:60:14] input [11:0] io_com_uops_0_csr_addr, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_0_rob_idx, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_0_ldq_idx, // @[rename-stage.scala:60:14] input [2:0] io_com_uops_0_stq_idx, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_rxq_idx, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_pdst, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_prs1, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_prs2, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_prs3, // @[rename-stage.scala:60:14] input [3:0] io_com_uops_0_ppred, // @[rename-stage.scala:60:14] input io_com_uops_0_prs1_busy, // @[rename-stage.scala:60:14] input io_com_uops_0_prs2_busy, // @[rename-stage.scala:60:14] input io_com_uops_0_prs3_busy, // @[rename-stage.scala:60:14] input io_com_uops_0_ppred_busy, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_stale_pdst, // @[rename-stage.scala:60:14] input io_com_uops_0_exception, // @[rename-stage.scala:60:14] input [63:0] io_com_uops_0_exc_cause, // @[rename-stage.scala:60:14] input io_com_uops_0_bypassable, // @[rename-stage.scala:60:14] input [4:0] io_com_uops_0_mem_cmd, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_mem_size, // @[rename-stage.scala:60:14] input io_com_uops_0_mem_signed, // @[rename-stage.scala:60:14] input io_com_uops_0_is_fence, // @[rename-stage.scala:60:14] input io_com_uops_0_is_fencei, // @[rename-stage.scala:60:14] input io_com_uops_0_is_amo, // @[rename-stage.scala:60:14] input io_com_uops_0_uses_ldq, // @[rename-stage.scala:60:14] input io_com_uops_0_uses_stq, // @[rename-stage.scala:60:14] input io_com_uops_0_is_sys_pc2epc, // @[rename-stage.scala:60:14] input io_com_uops_0_is_unique, // @[rename-stage.scala:60:14] input io_com_uops_0_flush_on_commit, // @[rename-stage.scala:60:14] input io_com_uops_0_ldst_is_rs1, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_ldst, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_lrs1, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_lrs2, // @[rename-stage.scala:60:14] input [5:0] io_com_uops_0_lrs3, // @[rename-stage.scala:60:14] input io_com_uops_0_ldst_val, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_dst_rtype, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_lrs1_rtype, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_lrs2_rtype, // @[rename-stage.scala:60:14] input io_com_uops_0_frs3_en, // @[rename-stage.scala:60:14] input io_com_uops_0_fp_val, // @[rename-stage.scala:60:14] input io_com_uops_0_fp_single, // @[rename-stage.scala:60:14] input io_com_uops_0_xcpt_pf_if, // @[rename-stage.scala:60:14] input io_com_uops_0_xcpt_ae_if, // @[rename-stage.scala:60:14] input io_com_uops_0_xcpt_ma_if, // @[rename-stage.scala:60:14] input io_com_uops_0_bp_debug_if, // @[rename-stage.scala:60:14] input io_com_uops_0_bp_xcpt_if, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_debug_fsrc, // @[rename-stage.scala:60:14] input [1:0] io_com_uops_0_debug_tsrc, // @[rename-stage.scala:60:14] input io_rbk_valids_0, // @[rename-stage.scala:60:14] input io_rollback, // @[rename-stage.scala:60:14] input io_debug_rob_empty // @[rename-stage.scala:60:14] ); wire [5:0] r_uop_bypassed_uop_stale_pdst; // @[rename-stage.scala:174:28] wire r_uop_bypassed_uop_prs2_busy; // @[rename-stage.scala:174:28] wire r_uop_bypassed_uop_prs1_busy; // @[rename-stage.scala:174:28] wire [5:0] r_uop_bypassed_uop_prs2; // @[rename-stage.scala:174:28] wire [5:0] r_uop_bypassed_uop_prs1; // @[rename-stage.scala:174:28] wire [1:0] next_uop_debug_tsrc; // @[rename-stage.scala:123:24] wire [1:0] next_uop_debug_fsrc; // @[rename-stage.scala:123:24] wire next_uop_bp_xcpt_if; // @[rename-stage.scala:123:24] wire next_uop_bp_debug_if; // @[rename-stage.scala:123:24] wire next_uop_xcpt_ma_if; // @[rename-stage.scala:123:24] wire next_uop_xcpt_ae_if; // @[rename-stage.scala:123:24] wire next_uop_xcpt_pf_if; // @[rename-stage.scala:123:24] wire next_uop_fp_single; // @[rename-stage.scala:123:24] wire next_uop_fp_val; // @[rename-stage.scala:123:24] wire next_uop_frs3_en; // @[rename-stage.scala:123:24] wire [1:0] next_uop_lrs2_rtype; // @[rename-stage.scala:123:24] wire [1:0] next_uop_lrs1_rtype; // @[rename-stage.scala:123:24] wire [1:0] next_uop_dst_rtype; // @[rename-stage.scala:123:24] wire next_uop_ldst_val; // @[rename-stage.scala:123:24] wire [5:0] next_uop_lrs3; // @[rename-stage.scala:123:24] wire [5:0] next_uop_lrs2; // @[rename-stage.scala:123:24] wire [5:0] next_uop_lrs1; // @[rename-stage.scala:123:24] wire [5:0] next_uop_ldst; // @[rename-stage.scala:123:24] wire next_uop_ldst_is_rs1; // @[rename-stage.scala:123:24] wire next_uop_flush_on_commit; // @[rename-stage.scala:123:24] wire next_uop_is_unique; // @[rename-stage.scala:123:24] wire next_uop_is_sys_pc2epc; // @[rename-stage.scala:123:24] wire next_uop_uses_stq; // @[rename-stage.scala:123:24] wire next_uop_uses_ldq; // @[rename-stage.scala:123:24] wire next_uop_is_amo; // @[rename-stage.scala:123:24] wire next_uop_is_fencei; // @[rename-stage.scala:123:24] wire next_uop_is_fence; // @[rename-stage.scala:123:24] wire next_uop_mem_signed; // @[rename-stage.scala:123:24] wire [1:0] next_uop_mem_size; // @[rename-stage.scala:123:24] wire [4:0] next_uop_mem_cmd; // @[rename-stage.scala:123:24] wire next_uop_bypassable; // @[rename-stage.scala:123:24] wire [63:0] next_uop_exc_cause; // @[rename-stage.scala:123:24] wire next_uop_exception; // @[rename-stage.scala:123:24] wire next_uop_ppred_busy; // @[rename-stage.scala:123:24] wire [3:0] next_uop_ppred; // @[rename-stage.scala:123:24] wire [5:0] next_uop_pdst; // @[rename-stage.scala:123:24] wire [1:0] next_uop_rxq_idx; // @[rename-stage.scala:123:24] wire [2:0] next_uop_stq_idx; // @[rename-stage.scala:123:24] wire [2:0] next_uop_ldq_idx; // @[rename-stage.scala:123:24] wire [4:0] next_uop_rob_idx; // @[rename-stage.scala:123:24] wire [11:0] next_uop_csr_addr; // @[rename-stage.scala:123:24] wire [19:0] next_uop_imm_packed; // @[rename-stage.scala:123:24] wire next_uop_taken; // @[rename-stage.scala:123:24] wire [5:0] next_uop_pc_lob; // @[rename-stage.scala:123:24] wire next_uop_edge_inst; // @[rename-stage.scala:123:24] wire [3:0] next_uop_ftq_idx; // @[rename-stage.scala:123:24] wire [2:0] next_uop_br_tag; // @[rename-stage.scala:123:24] wire [7:0] next_uop_br_mask; // @[rename-stage.scala:123:24] wire next_uop_is_sfb; // @[rename-stage.scala:123:24] wire next_uop_is_jal; // @[rename-stage.scala:123:24] wire next_uop_is_jalr; // @[rename-stage.scala:123:24] wire next_uop_is_br; // @[rename-stage.scala:123:24] wire next_uop_iw_p2_poisoned; // @[rename-stage.scala:123:24] wire next_uop_iw_p1_poisoned; // @[rename-stage.scala:123:24] wire [1:0] next_uop_iw_state; // @[rename-stage.scala:123:24] wire [9:0] next_uop_fu_code; // @[rename-stage.scala:123:24] wire [2:0] next_uop_iq_type; // @[rename-stage.scala:123:24] wire [39:0] next_uop_debug_pc; // @[rename-stage.scala:123:24] wire next_uop_is_rvc; // @[rename-stage.scala:123:24] wire [31:0] next_uop_debug_inst; // @[rename-stage.scala:123:24] wire [31:0] next_uop_inst; // @[rename-stage.scala:123:24] wire [6:0] next_uop_uopc; // @[rename-stage.scala:123:24] wire next_uop_ctrl_is_std; // @[rename-stage.scala:123:24] wire next_uop_ctrl_is_sta; // @[rename-stage.scala:123:24] wire next_uop_ctrl_is_load; // @[rename-stage.scala:123:24] wire [2:0] next_uop_ctrl_csr_cmd; // @[rename-stage.scala:123:24] wire next_uop_ctrl_fcn_dw; // @[rename-stage.scala:123:24] wire [4:0] next_uop_ctrl_op_fcn; // @[rename-stage.scala:123:24] wire [2:0] next_uop_ctrl_imm_sel; // @[rename-stage.scala:123:24] wire [2:0] next_uop_ctrl_op2_sel; // @[rename-stage.scala:123:24] wire [1:0] next_uop_ctrl_op1_sel; // @[rename-stage.scala:123:24] wire [3:0] next_uop_ctrl_br_type; // @[rename-stage.scala:123:24] wire [1:0] ren2_uops_0_debug_tsrc; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_debug_fsrc; // @[rename-stage.scala:108:29] wire ren2_uops_0_bp_xcpt_if; // @[rename-stage.scala:108:29] wire ren2_uops_0_bp_debug_if; // @[rename-stage.scala:108:29] wire ren2_uops_0_xcpt_ma_if; // @[rename-stage.scala:108:29] wire ren2_uops_0_xcpt_ae_if; // @[rename-stage.scala:108:29] wire ren2_uops_0_xcpt_pf_if; // @[rename-stage.scala:108:29] wire ren2_uops_0_fp_single; // @[rename-stage.scala:108:29] wire ren2_uops_0_fp_val; // @[rename-stage.scala:108:29] wire ren2_uops_0_frs3_en; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_lrs2_rtype; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_lrs1_rtype; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_dst_rtype; // @[rename-stage.scala:108:29] wire ren2_uops_0_ldst_val; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_lrs3; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_lrs2; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_lrs1; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_ldst; // @[rename-stage.scala:108:29] wire ren2_uops_0_ldst_is_rs1; // @[rename-stage.scala:108:29] wire ren2_uops_0_flush_on_commit; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_unique; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_sys_pc2epc; // @[rename-stage.scala:108:29] wire ren2_uops_0_uses_stq; // @[rename-stage.scala:108:29] wire ren2_uops_0_uses_ldq; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_amo; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_fencei; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_fence; // @[rename-stage.scala:108:29] wire ren2_uops_0_mem_signed; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_mem_size; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_0_mem_cmd; // @[rename-stage.scala:108:29] wire ren2_uops_0_bypassable; // @[rename-stage.scala:108:29] wire [63:0] ren2_uops_0_exc_cause; // @[rename-stage.scala:108:29] wire ren2_uops_0_exception; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_stale_pdst; // @[rename-stage.scala:108:29] wire ren2_uops_0_ppred_busy; // @[rename-stage.scala:108:29] wire ren2_uops_0_prs2_busy; // @[rename-stage.scala:108:29] wire ren2_uops_0_prs1_busy; // @[rename-stage.scala:108:29] wire [3:0] ren2_uops_0_ppred; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_prs2; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_prs1; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_pdst; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_rxq_idx; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_0_stq_idx; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_0_ldq_idx; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_0_rob_idx; // @[rename-stage.scala:108:29] wire [11:0] ren2_uops_0_csr_addr; // @[rename-stage.scala:108:29] wire [19:0] ren2_uops_0_imm_packed; // @[rename-stage.scala:108:29] wire ren2_uops_0_taken; // @[rename-stage.scala:108:29] wire [5:0] ren2_uops_0_pc_lob; // @[rename-stage.scala:108:29] wire ren2_uops_0_edge_inst; // @[rename-stage.scala:108:29] wire [3:0] ren2_uops_0_ftq_idx; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_0_br_tag; // @[rename-stage.scala:108:29] wire [7:0] ren2_uops_0_br_mask; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_jal; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_jalr; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_br; // @[rename-stage.scala:108:29] wire ren2_uops_0_iw_p2_poisoned; // @[rename-stage.scala:108:29] wire ren2_uops_0_iw_p1_poisoned; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_iw_state; // @[rename-stage.scala:108:29] wire [9:0] ren2_uops_0_fu_code; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_0_iq_type; // @[rename-stage.scala:108:29] wire [39:0] ren2_uops_0_debug_pc; // @[rename-stage.scala:108:29] wire ren2_uops_0_is_rvc; // @[rename-stage.scala:108:29] wire [31:0] ren2_uops_0_debug_inst; // @[rename-stage.scala:108:29] wire [31:0] ren2_uops_0_inst; // @[rename-stage.scala:108:29] wire [6:0] ren2_uops_0_uopc; // @[rename-stage.scala:108:29] wire ren2_uops_0_ctrl_is_std; // @[rename-stage.scala:108:29] wire ren2_uops_0_ctrl_is_sta; // @[rename-stage.scala:108:29] wire ren2_uops_0_ctrl_is_load; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:108:29] wire ren2_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:108:29] wire [4:0] ren2_uops_0_ctrl_op_fcn; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_0_ctrl_imm_sel; // @[rename-stage.scala:108:29] wire [2:0] ren2_uops_0_ctrl_op2_sel; // @[rename-stage.scala:108:29] wire [1:0] ren2_uops_0_ctrl_op1_sel; // @[rename-stage.scala:108:29] wire [3:0] ren2_uops_0_ctrl_br_type; // @[rename-stage.scala:108:29] wire _busytable_io_busy_resps_0_prs1_busy; // @[rename-stage.scala:224:25] wire _busytable_io_busy_resps_0_prs2_busy; // @[rename-stage.scala:224:25] wire _freelist_io_alloc_pregs_0_valid; // @[rename-stage.scala:220:24] wire [5:0] _freelist_io_alloc_pregs_0_bits; // @[rename-stage.scala:220:24] wire io_kill_0 = io_kill; // @[rename-stage.scala:160:7] wire io_dec_fire_0_0 = io_dec_fire_0; // @[rename-stage.scala:160:7] wire [6:0] io_dec_uops_0_uopc_0 = io_dec_uops_0_uopc; // @[rename-stage.scala:160:7] wire [31:0] io_dec_uops_0_inst_0 = io_dec_uops_0_inst; // @[rename-stage.scala:160:7] wire [31:0] io_dec_uops_0_debug_inst_0 = io_dec_uops_0_debug_inst; // @[rename-stage.scala:160:7] wire io_dec_uops_0_is_rvc_0 = io_dec_uops_0_is_rvc; // @[rename-stage.scala:160:7] wire [39:0] io_dec_uops_0_debug_pc_0 = io_dec_uops_0_debug_pc; // @[rename-stage.scala:160:7] wire [2:0] io_dec_uops_0_iq_type_0 = io_dec_uops_0_iq_type; // @[rename-stage.scala:160:7] wire [9:0] io_dec_uops_0_fu_code_0 = io_dec_uops_0_fu_code; // @[rename-stage.scala:160:7] wire io_dec_uops_0_is_br_0 = io_dec_uops_0_is_br; // @[rename-stage.scala:160:7] wire io_dec_uops_0_is_jalr_0 = io_dec_uops_0_is_jalr; // @[rename-stage.scala:160:7] wire io_dec_uops_0_is_jal_0 = io_dec_uops_0_is_jal; // @[rename-stage.scala:160:7] wire io_dec_uops_0_is_sfb_0 = io_dec_uops_0_is_sfb; // @[rename-stage.scala:160:7] wire [7:0] io_dec_uops_0_br_mask_0 = io_dec_uops_0_br_mask; // @[rename-stage.scala:160:7] wire [2:0] io_dec_uops_0_br_tag_0 = io_dec_uops_0_br_tag; // @[rename-stage.scala:160:7] wire [3:0] io_dec_uops_0_ftq_idx_0 = io_dec_uops_0_ftq_idx; // @[rename-stage.scala:160:7] wire io_dec_uops_0_edge_inst_0 = io_dec_uops_0_edge_inst; // @[rename-stage.scala:160:7] wire [5:0] io_dec_uops_0_pc_lob_0 = io_dec_uops_0_pc_lob; // @[rename-stage.scala:160:7] wire io_dec_uops_0_taken_0 = io_dec_uops_0_taken; // @[rename-stage.scala:160:7] wire [19:0] io_dec_uops_0_imm_packed_0 = io_dec_uops_0_imm_packed; // @[rename-stage.scala:160:7] wire io_dec_uops_0_exception_0 = io_dec_uops_0_exception; // @[rename-stage.scala:160:7] wire [63:0] io_dec_uops_0_exc_cause_0 = io_dec_uops_0_exc_cause; // @[rename-stage.scala:160:7] wire io_dec_uops_0_bypassable_0 = io_dec_uops_0_bypassable; // @[rename-stage.scala:160:7] wire [4:0] io_dec_uops_0_mem_cmd_0 = io_dec_uops_0_mem_cmd; // @[rename-stage.scala:160:7] wire [1:0] io_dec_uops_0_mem_size_0 = io_dec_uops_0_mem_size; // @[rename-stage.scala:160:7] wire io_dec_uops_0_mem_signed_0 = io_dec_uops_0_mem_signed; // @[rename-stage.scala:160:7] wire io_dec_uops_0_is_fence_0 = io_dec_uops_0_is_fence; // @[rename-stage.scala:160:7] wire io_dec_uops_0_is_fencei_0 = io_dec_uops_0_is_fencei; // @[rename-stage.scala:160:7] wire io_dec_uops_0_is_amo_0 = io_dec_uops_0_is_amo; // @[rename-stage.scala:160:7] wire io_dec_uops_0_uses_ldq_0 = io_dec_uops_0_uses_ldq; // @[rename-stage.scala:160:7] wire io_dec_uops_0_uses_stq_0 = io_dec_uops_0_uses_stq; // @[rename-stage.scala:160:7] wire io_dec_uops_0_is_sys_pc2epc_0 = io_dec_uops_0_is_sys_pc2epc; // @[rename-stage.scala:160:7] wire io_dec_uops_0_is_unique_0 = io_dec_uops_0_is_unique; // @[rename-stage.scala:160:7] wire io_dec_uops_0_flush_on_commit_0 = io_dec_uops_0_flush_on_commit; // @[rename-stage.scala:160:7] wire [5:0] io_dec_uops_0_ldst_0 = io_dec_uops_0_ldst; // @[rename-stage.scala:160:7] wire [5:0] io_dec_uops_0_lrs1_0 = io_dec_uops_0_lrs1; // @[rename-stage.scala:160:7] wire [5:0] io_dec_uops_0_lrs2_0 = io_dec_uops_0_lrs2; // @[rename-stage.scala:160:7] wire [5:0] io_dec_uops_0_lrs3_0 = io_dec_uops_0_lrs3; // @[rename-stage.scala:160:7] wire io_dec_uops_0_ldst_val_0 = io_dec_uops_0_ldst_val; // @[rename-stage.scala:160:7] wire [1:0] io_dec_uops_0_dst_rtype_0 = io_dec_uops_0_dst_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_dec_uops_0_lrs1_rtype_0 = io_dec_uops_0_lrs1_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_dec_uops_0_lrs2_rtype_0 = io_dec_uops_0_lrs2_rtype; // @[rename-stage.scala:160:7] wire io_dec_uops_0_frs3_en_0 = io_dec_uops_0_frs3_en; // @[rename-stage.scala:160:7] wire io_dec_uops_0_fp_val_0 = io_dec_uops_0_fp_val; // @[rename-stage.scala:160:7] wire io_dec_uops_0_fp_single_0 = io_dec_uops_0_fp_single; // @[rename-stage.scala:160:7] wire io_dec_uops_0_xcpt_pf_if_0 = io_dec_uops_0_xcpt_pf_if; // @[rename-stage.scala:160:7] wire io_dec_uops_0_xcpt_ae_if_0 = io_dec_uops_0_xcpt_ae_if; // @[rename-stage.scala:160:7] wire io_dec_uops_0_bp_debug_if_0 = io_dec_uops_0_bp_debug_if; // @[rename-stage.scala:160:7] wire io_dec_uops_0_bp_xcpt_if_0 = io_dec_uops_0_bp_xcpt_if; // @[rename-stage.scala:160:7] wire [1:0] io_dec_uops_0_debug_fsrc_0 = io_dec_uops_0_debug_fsrc; // @[rename-stage.scala:160:7] wire [7:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[rename-stage.scala:160:7] wire [7:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[rename-stage.scala:160:7] wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[rename-stage.scala:160:7] wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[rename-stage.scala:160:7] wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[rename-stage.scala:160:7] wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[rename-stage.scala:160:7] wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[rename-stage.scala:160:7] wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[rename-stage.scala:160:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[rename-stage.scala:160:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[rename-stage.scala:160:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[rename-stage.scala:160:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[rename-stage.scala:160:7] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[rename-stage.scala:160:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[rename-stage.scala:160:7] wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[rename-stage.scala:160:7] wire [7:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[rename-stage.scala:160:7] wire [2:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[rename-stage.scala:160:7] wire [3:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[rename-stage.scala:160:7] wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[rename-stage.scala:160:7] wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[rename-stage.scala:160:7] wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[rename-stage.scala:160:7] wire [4:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[rename-stage.scala:160:7] wire [2:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[rename-stage.scala:160:7] wire [2:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[rename-stage.scala:160:7] wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[rename-stage.scala:160:7] wire [5:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[rename-stage.scala:160:7] wire [5:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[rename-stage.scala:160:7] wire [5:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[rename-stage.scala:160:7] wire [5:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[rename-stage.scala:160:7] wire [3:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[rename-stage.scala:160:7] wire [5:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[rename-stage.scala:160:7] wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[rename-stage.scala:160:7] wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[rename-stage.scala:160:7] wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[rename-stage.scala:160:7] wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[rename-stage.scala:160:7] wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[rename-stage.scala:160:7] wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[rename-stage.scala:160:7] wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[rename-stage.scala:160:7] wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[rename-stage.scala:160:7] wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[rename-stage.scala:160:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[rename-stage.scala:160:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[rename-stage.scala:160:7] wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[rename-stage.scala:160:7] wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[rename-stage.scala:160:7] wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[rename-stage.scala:160:7] wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[rename-stage.scala:160:7] wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[rename-stage.scala:160:7] wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[rename-stage.scala:160:7] wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[rename-stage.scala:160:7] wire io_dis_fire_0_0 = io_dis_fire_0; // @[rename-stage.scala:160:7] wire io_dis_ready_0 = io_dis_ready; // @[rename-stage.scala:160:7] wire io_wakeups_0_valid_0 = io_wakeups_0_valid; // @[rename-stage.scala:160:7] wire [6:0] io_wakeups_0_bits_uop_uopc_0 = io_wakeups_0_bits_uop_uopc; // @[rename-stage.scala:160:7] wire [31:0] io_wakeups_0_bits_uop_inst_0 = io_wakeups_0_bits_uop_inst; // @[rename-stage.scala:160:7] wire [31:0] io_wakeups_0_bits_uop_debug_inst_0 = io_wakeups_0_bits_uop_debug_inst; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_is_rvc_0 = io_wakeups_0_bits_uop_is_rvc; // @[rename-stage.scala:160:7] wire [39:0] io_wakeups_0_bits_uop_debug_pc_0 = io_wakeups_0_bits_uop_debug_pc; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_uop_iq_type_0 = io_wakeups_0_bits_uop_iq_type; // @[rename-stage.scala:160:7] wire [9:0] io_wakeups_0_bits_uop_fu_code_0 = io_wakeups_0_bits_uop_fu_code; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_0_bits_uop_ctrl_br_type_0 = io_wakeups_0_bits_uop_ctrl_br_type; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_uop_ctrl_op1_sel_0 = io_wakeups_0_bits_uop_ctrl_op1_sel; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_uop_ctrl_op2_sel_0 = io_wakeups_0_bits_uop_ctrl_op2_sel; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_uop_ctrl_imm_sel_0 = io_wakeups_0_bits_uop_ctrl_imm_sel; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_0_bits_uop_ctrl_op_fcn_0 = io_wakeups_0_bits_uop_ctrl_op_fcn; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_ctrl_fcn_dw_0 = io_wakeups_0_bits_uop_ctrl_fcn_dw; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_uop_ctrl_csr_cmd_0 = io_wakeups_0_bits_uop_ctrl_csr_cmd; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_ctrl_is_load_0 = io_wakeups_0_bits_uop_ctrl_is_load; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_ctrl_is_sta_0 = io_wakeups_0_bits_uop_ctrl_is_sta; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_ctrl_is_std_0 = io_wakeups_0_bits_uop_ctrl_is_std; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_uop_iw_state_0 = io_wakeups_0_bits_uop_iw_state; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_iw_p1_poisoned_0 = io_wakeups_0_bits_uop_iw_p1_poisoned; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_iw_p2_poisoned_0 = io_wakeups_0_bits_uop_iw_p2_poisoned; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_is_br_0 = io_wakeups_0_bits_uop_is_br; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_is_jalr_0 = io_wakeups_0_bits_uop_is_jalr; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_is_jal_0 = io_wakeups_0_bits_uop_is_jal; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_is_sfb_0 = io_wakeups_0_bits_uop_is_sfb; // @[rename-stage.scala:160:7] wire [7:0] io_wakeups_0_bits_uop_br_mask_0 = io_wakeups_0_bits_uop_br_mask; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_uop_br_tag_0 = io_wakeups_0_bits_uop_br_tag; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_0_bits_uop_ftq_idx_0 = io_wakeups_0_bits_uop_ftq_idx; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_edge_inst_0 = io_wakeups_0_bits_uop_edge_inst; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_uop_pc_lob_0 = io_wakeups_0_bits_uop_pc_lob; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_taken_0 = io_wakeups_0_bits_uop_taken; // @[rename-stage.scala:160:7] wire [19:0] io_wakeups_0_bits_uop_imm_packed_0 = io_wakeups_0_bits_uop_imm_packed; // @[rename-stage.scala:160:7] wire [11:0] io_wakeups_0_bits_uop_csr_addr_0 = io_wakeups_0_bits_uop_csr_addr; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_0_bits_uop_rob_idx_0 = io_wakeups_0_bits_uop_rob_idx; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_uop_ldq_idx_0 = io_wakeups_0_bits_uop_ldq_idx; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_uop_stq_idx_0 = io_wakeups_0_bits_uop_stq_idx; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_uop_rxq_idx_0 = io_wakeups_0_bits_uop_rxq_idx; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_uop_pdst_0 = io_wakeups_0_bits_uop_pdst; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_uop_prs1_0 = io_wakeups_0_bits_uop_prs1; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_uop_prs2_0 = io_wakeups_0_bits_uop_prs2; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_uop_prs3_0 = io_wakeups_0_bits_uop_prs3; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_0_bits_uop_ppred_0 = io_wakeups_0_bits_uop_ppred; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_prs1_busy_0 = io_wakeups_0_bits_uop_prs1_busy; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_prs2_busy_0 = io_wakeups_0_bits_uop_prs2_busy; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_prs3_busy_0 = io_wakeups_0_bits_uop_prs3_busy; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_ppred_busy_0 = io_wakeups_0_bits_uop_ppred_busy; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_uop_stale_pdst_0 = io_wakeups_0_bits_uop_stale_pdst; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_exception_0 = io_wakeups_0_bits_uop_exception; // @[rename-stage.scala:160:7] wire [63:0] io_wakeups_0_bits_uop_exc_cause_0 = io_wakeups_0_bits_uop_exc_cause; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_bypassable_0 = io_wakeups_0_bits_uop_bypassable; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_0_bits_uop_mem_cmd_0 = io_wakeups_0_bits_uop_mem_cmd; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_uop_mem_size_0 = io_wakeups_0_bits_uop_mem_size; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_mem_signed_0 = io_wakeups_0_bits_uop_mem_signed; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_is_fence_0 = io_wakeups_0_bits_uop_is_fence; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_is_fencei_0 = io_wakeups_0_bits_uop_is_fencei; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_is_amo_0 = io_wakeups_0_bits_uop_is_amo; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_uses_ldq_0 = io_wakeups_0_bits_uop_uses_ldq; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_uses_stq_0 = io_wakeups_0_bits_uop_uses_stq; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_is_sys_pc2epc_0 = io_wakeups_0_bits_uop_is_sys_pc2epc; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_is_unique_0 = io_wakeups_0_bits_uop_is_unique; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_flush_on_commit_0 = io_wakeups_0_bits_uop_flush_on_commit; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_ldst_is_rs1_0 = io_wakeups_0_bits_uop_ldst_is_rs1; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_uop_ldst_0 = io_wakeups_0_bits_uop_ldst; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_uop_lrs1_0 = io_wakeups_0_bits_uop_lrs1; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_uop_lrs2_0 = io_wakeups_0_bits_uop_lrs2; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_uop_lrs3_0 = io_wakeups_0_bits_uop_lrs3; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_ldst_val_0 = io_wakeups_0_bits_uop_ldst_val; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_uop_dst_rtype_0 = io_wakeups_0_bits_uop_dst_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_uop_lrs1_rtype_0 = io_wakeups_0_bits_uop_lrs1_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_uop_lrs2_rtype_0 = io_wakeups_0_bits_uop_lrs2_rtype; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_frs3_en_0 = io_wakeups_0_bits_uop_frs3_en; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_fp_val_0 = io_wakeups_0_bits_uop_fp_val; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_fp_single_0 = io_wakeups_0_bits_uop_fp_single; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_xcpt_pf_if_0 = io_wakeups_0_bits_uop_xcpt_pf_if; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_xcpt_ae_if_0 = io_wakeups_0_bits_uop_xcpt_ae_if; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_xcpt_ma_if_0 = io_wakeups_0_bits_uop_xcpt_ma_if; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_bp_debug_if_0 = io_wakeups_0_bits_uop_bp_debug_if; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_uop_bp_xcpt_if_0 = io_wakeups_0_bits_uop_bp_xcpt_if; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_uop_debug_fsrc_0 = io_wakeups_0_bits_uop_debug_fsrc; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_uop_debug_tsrc_0 = io_wakeups_0_bits_uop_debug_tsrc; // @[rename-stage.scala:160:7] wire [63:0] io_wakeups_0_bits_data_0 = io_wakeups_0_bits_data; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_predicated_0 = io_wakeups_0_bits_predicated; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_valid_0 = io_wakeups_0_bits_fflags_valid; // @[rename-stage.scala:160:7] wire [6:0] io_wakeups_0_bits_fflags_bits_uop_uopc_0 = io_wakeups_0_bits_fflags_bits_uop_uopc; // @[rename-stage.scala:160:7] wire [31:0] io_wakeups_0_bits_fflags_bits_uop_inst_0 = io_wakeups_0_bits_fflags_bits_uop_inst; // @[rename-stage.scala:160:7] wire [31:0] io_wakeups_0_bits_fflags_bits_uop_debug_inst_0 = io_wakeups_0_bits_fflags_bits_uop_debug_inst; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_is_rvc_0 = io_wakeups_0_bits_fflags_bits_uop_is_rvc; // @[rename-stage.scala:160:7] wire [39:0] io_wakeups_0_bits_fflags_bits_uop_debug_pc_0 = io_wakeups_0_bits_fflags_bits_uop_debug_pc; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_fflags_bits_uop_iq_type_0 = io_wakeups_0_bits_fflags_bits_uop_iq_type; // @[rename-stage.scala:160:7] wire [9:0] io_wakeups_0_bits_fflags_bits_uop_fu_code_0 = io_wakeups_0_bits_fflags_bits_uop_fu_code; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_br_type_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_br_type; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op1_sel_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_op1_sel; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op2_sel_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_op2_sel; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_imm_sel_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_imm_sel; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op_fcn_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_op_fcn; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_ctrl_fcn_dw_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_fcn_dw; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_csr_cmd_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_csr_cmd; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_load_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_is_load; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_sta_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_is_sta; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_std_0 = io_wakeups_0_bits_fflags_bits_uop_ctrl_is_std; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_iw_state_0 = io_wakeups_0_bits_fflags_bits_uop_iw_state; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_iw_p1_poisoned_0 = io_wakeups_0_bits_fflags_bits_uop_iw_p1_poisoned; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_iw_p2_poisoned_0 = io_wakeups_0_bits_fflags_bits_uop_iw_p2_poisoned; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_is_br_0 = io_wakeups_0_bits_fflags_bits_uop_is_br; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_is_jalr_0 = io_wakeups_0_bits_fflags_bits_uop_is_jalr; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_is_jal_0 = io_wakeups_0_bits_fflags_bits_uop_is_jal; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_is_sfb_0 = io_wakeups_0_bits_fflags_bits_uop_is_sfb; // @[rename-stage.scala:160:7] wire [7:0] io_wakeups_0_bits_fflags_bits_uop_br_mask_0 = io_wakeups_0_bits_fflags_bits_uop_br_mask; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_fflags_bits_uop_br_tag_0 = io_wakeups_0_bits_fflags_bits_uop_br_tag; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_0_bits_fflags_bits_uop_ftq_idx_0 = io_wakeups_0_bits_fflags_bits_uop_ftq_idx; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_edge_inst_0 = io_wakeups_0_bits_fflags_bits_uop_edge_inst; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_pc_lob_0 = io_wakeups_0_bits_fflags_bits_uop_pc_lob; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_taken_0 = io_wakeups_0_bits_fflags_bits_uop_taken; // @[rename-stage.scala:160:7] wire [19:0] io_wakeups_0_bits_fflags_bits_uop_imm_packed_0 = io_wakeups_0_bits_fflags_bits_uop_imm_packed; // @[rename-stage.scala:160:7] wire [11:0] io_wakeups_0_bits_fflags_bits_uop_csr_addr_0 = io_wakeups_0_bits_fflags_bits_uop_csr_addr; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_0_bits_fflags_bits_uop_rob_idx_0 = io_wakeups_0_bits_fflags_bits_uop_rob_idx; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ldq_idx_0 = io_wakeups_0_bits_fflags_bits_uop_ldq_idx; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_0_bits_fflags_bits_uop_stq_idx_0 = io_wakeups_0_bits_fflags_bits_uop_stq_idx; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_rxq_idx_0 = io_wakeups_0_bits_fflags_bits_uop_rxq_idx; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_pdst_0 = io_wakeups_0_bits_fflags_bits_uop_pdst; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_prs1_0 = io_wakeups_0_bits_fflags_bits_uop_prs1; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_prs2_0 = io_wakeups_0_bits_fflags_bits_uop_prs2; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_prs3_0 = io_wakeups_0_bits_fflags_bits_uop_prs3; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_0_bits_fflags_bits_uop_ppred_0 = io_wakeups_0_bits_fflags_bits_uop_ppred; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_prs1_busy_0 = io_wakeups_0_bits_fflags_bits_uop_prs1_busy; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_prs2_busy_0 = io_wakeups_0_bits_fflags_bits_uop_prs2_busy; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_prs3_busy_0 = io_wakeups_0_bits_fflags_bits_uop_prs3_busy; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_ppred_busy_0 = io_wakeups_0_bits_fflags_bits_uop_ppred_busy; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_stale_pdst_0 = io_wakeups_0_bits_fflags_bits_uop_stale_pdst; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_exception_0 = io_wakeups_0_bits_fflags_bits_uop_exception; // @[rename-stage.scala:160:7] wire [63:0] io_wakeups_0_bits_fflags_bits_uop_exc_cause_0 = io_wakeups_0_bits_fflags_bits_uop_exc_cause; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_bypassable_0 = io_wakeups_0_bits_fflags_bits_uop_bypassable; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_0_bits_fflags_bits_uop_mem_cmd_0 = io_wakeups_0_bits_fflags_bits_uop_mem_cmd; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_mem_size_0 = io_wakeups_0_bits_fflags_bits_uop_mem_size; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_mem_signed_0 = io_wakeups_0_bits_fflags_bits_uop_mem_signed; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_is_fence_0 = io_wakeups_0_bits_fflags_bits_uop_is_fence; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_is_fencei_0 = io_wakeups_0_bits_fflags_bits_uop_is_fencei; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_is_amo_0 = io_wakeups_0_bits_fflags_bits_uop_is_amo; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_uses_ldq_0 = io_wakeups_0_bits_fflags_bits_uop_uses_ldq; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_uses_stq_0 = io_wakeups_0_bits_fflags_bits_uop_uses_stq; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_is_sys_pc2epc_0 = io_wakeups_0_bits_fflags_bits_uop_is_sys_pc2epc; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_is_unique_0 = io_wakeups_0_bits_fflags_bits_uop_is_unique; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_flush_on_commit_0 = io_wakeups_0_bits_fflags_bits_uop_flush_on_commit; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_ldst_is_rs1_0 = io_wakeups_0_bits_fflags_bits_uop_ldst_is_rs1; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_ldst_0 = io_wakeups_0_bits_fflags_bits_uop_ldst; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs1_0 = io_wakeups_0_bits_fflags_bits_uop_lrs1; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs2_0 = io_wakeups_0_bits_fflags_bits_uop_lrs2; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs3_0 = io_wakeups_0_bits_fflags_bits_uop_lrs3; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_ldst_val_0 = io_wakeups_0_bits_fflags_bits_uop_ldst_val; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_dst_rtype_0 = io_wakeups_0_bits_fflags_bits_uop_dst_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs1_rtype_0 = io_wakeups_0_bits_fflags_bits_uop_lrs1_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs2_rtype_0 = io_wakeups_0_bits_fflags_bits_uop_lrs2_rtype; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_frs3_en_0 = io_wakeups_0_bits_fflags_bits_uop_frs3_en; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_fp_val_0 = io_wakeups_0_bits_fflags_bits_uop_fp_val; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_fp_single_0 = io_wakeups_0_bits_fflags_bits_uop_fp_single; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_xcpt_pf_if_0 = io_wakeups_0_bits_fflags_bits_uop_xcpt_pf_if; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_xcpt_ae_if_0 = io_wakeups_0_bits_fflags_bits_uop_xcpt_ae_if; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_xcpt_ma_if_0 = io_wakeups_0_bits_fflags_bits_uop_xcpt_ma_if; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_bp_debug_if_0 = io_wakeups_0_bits_fflags_bits_uop_bp_debug_if; // @[rename-stage.scala:160:7] wire io_wakeups_0_bits_fflags_bits_uop_bp_xcpt_if_0 = io_wakeups_0_bits_fflags_bits_uop_bp_xcpt_if; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_fsrc_0 = io_wakeups_0_bits_fflags_bits_uop_debug_fsrc; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_tsrc_0 = io_wakeups_0_bits_fflags_bits_uop_debug_tsrc; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_0_bits_fflags_bits_flags_0 = io_wakeups_0_bits_fflags_bits_flags; // @[rename-stage.scala:160:7] wire io_wakeups_1_valid_0 = io_wakeups_1_valid; // @[rename-stage.scala:160:7] wire [6:0] io_wakeups_1_bits_uop_uopc_0 = io_wakeups_1_bits_uop_uopc; // @[rename-stage.scala:160:7] wire [31:0] io_wakeups_1_bits_uop_inst_0 = io_wakeups_1_bits_uop_inst; // @[rename-stage.scala:160:7] wire [31:0] io_wakeups_1_bits_uop_debug_inst_0 = io_wakeups_1_bits_uop_debug_inst; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_is_rvc_0 = io_wakeups_1_bits_uop_is_rvc; // @[rename-stage.scala:160:7] wire [39:0] io_wakeups_1_bits_uop_debug_pc_0 = io_wakeups_1_bits_uop_debug_pc; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_uop_iq_type_0 = io_wakeups_1_bits_uop_iq_type; // @[rename-stage.scala:160:7] wire [9:0] io_wakeups_1_bits_uop_fu_code_0 = io_wakeups_1_bits_uop_fu_code; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_1_bits_uop_ctrl_br_type_0 = io_wakeups_1_bits_uop_ctrl_br_type; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_uop_ctrl_op1_sel_0 = io_wakeups_1_bits_uop_ctrl_op1_sel; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_uop_ctrl_op2_sel_0 = io_wakeups_1_bits_uop_ctrl_op2_sel; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_uop_ctrl_imm_sel_0 = io_wakeups_1_bits_uop_ctrl_imm_sel; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_1_bits_uop_ctrl_op_fcn_0 = io_wakeups_1_bits_uop_ctrl_op_fcn; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_ctrl_fcn_dw_0 = io_wakeups_1_bits_uop_ctrl_fcn_dw; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_uop_ctrl_csr_cmd_0 = io_wakeups_1_bits_uop_ctrl_csr_cmd; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_ctrl_is_load_0 = io_wakeups_1_bits_uop_ctrl_is_load; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_ctrl_is_sta_0 = io_wakeups_1_bits_uop_ctrl_is_sta; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_ctrl_is_std_0 = io_wakeups_1_bits_uop_ctrl_is_std; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_uop_iw_state_0 = io_wakeups_1_bits_uop_iw_state; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_iw_p1_poisoned_0 = io_wakeups_1_bits_uop_iw_p1_poisoned; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_iw_p2_poisoned_0 = io_wakeups_1_bits_uop_iw_p2_poisoned; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_is_br_0 = io_wakeups_1_bits_uop_is_br; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_is_jalr_0 = io_wakeups_1_bits_uop_is_jalr; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_is_jal_0 = io_wakeups_1_bits_uop_is_jal; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_is_sfb_0 = io_wakeups_1_bits_uop_is_sfb; // @[rename-stage.scala:160:7] wire [7:0] io_wakeups_1_bits_uop_br_mask_0 = io_wakeups_1_bits_uop_br_mask; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_uop_br_tag_0 = io_wakeups_1_bits_uop_br_tag; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_1_bits_uop_ftq_idx_0 = io_wakeups_1_bits_uop_ftq_idx; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_edge_inst_0 = io_wakeups_1_bits_uop_edge_inst; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_uop_pc_lob_0 = io_wakeups_1_bits_uop_pc_lob; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_taken_0 = io_wakeups_1_bits_uop_taken; // @[rename-stage.scala:160:7] wire [19:0] io_wakeups_1_bits_uop_imm_packed_0 = io_wakeups_1_bits_uop_imm_packed; // @[rename-stage.scala:160:7] wire [11:0] io_wakeups_1_bits_uop_csr_addr_0 = io_wakeups_1_bits_uop_csr_addr; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_1_bits_uop_rob_idx_0 = io_wakeups_1_bits_uop_rob_idx; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_uop_ldq_idx_0 = io_wakeups_1_bits_uop_ldq_idx; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_uop_stq_idx_0 = io_wakeups_1_bits_uop_stq_idx; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_uop_rxq_idx_0 = io_wakeups_1_bits_uop_rxq_idx; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_uop_pdst_0 = io_wakeups_1_bits_uop_pdst; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_uop_prs1_0 = io_wakeups_1_bits_uop_prs1; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_uop_prs2_0 = io_wakeups_1_bits_uop_prs2; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_uop_prs3_0 = io_wakeups_1_bits_uop_prs3; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_1_bits_uop_ppred_0 = io_wakeups_1_bits_uop_ppred; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_prs1_busy_0 = io_wakeups_1_bits_uop_prs1_busy; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_prs2_busy_0 = io_wakeups_1_bits_uop_prs2_busy; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_prs3_busy_0 = io_wakeups_1_bits_uop_prs3_busy; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_ppred_busy_0 = io_wakeups_1_bits_uop_ppred_busy; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_uop_stale_pdst_0 = io_wakeups_1_bits_uop_stale_pdst; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_exception_0 = io_wakeups_1_bits_uop_exception; // @[rename-stage.scala:160:7] wire [63:0] io_wakeups_1_bits_uop_exc_cause_0 = io_wakeups_1_bits_uop_exc_cause; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_bypassable_0 = io_wakeups_1_bits_uop_bypassable; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_1_bits_uop_mem_cmd_0 = io_wakeups_1_bits_uop_mem_cmd; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_uop_mem_size_0 = io_wakeups_1_bits_uop_mem_size; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_mem_signed_0 = io_wakeups_1_bits_uop_mem_signed; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_is_fence_0 = io_wakeups_1_bits_uop_is_fence; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_is_fencei_0 = io_wakeups_1_bits_uop_is_fencei; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_is_amo_0 = io_wakeups_1_bits_uop_is_amo; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_uses_ldq_0 = io_wakeups_1_bits_uop_uses_ldq; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_uses_stq_0 = io_wakeups_1_bits_uop_uses_stq; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_is_sys_pc2epc_0 = io_wakeups_1_bits_uop_is_sys_pc2epc; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_is_unique_0 = io_wakeups_1_bits_uop_is_unique; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_flush_on_commit_0 = io_wakeups_1_bits_uop_flush_on_commit; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_ldst_is_rs1_0 = io_wakeups_1_bits_uop_ldst_is_rs1; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_uop_ldst_0 = io_wakeups_1_bits_uop_ldst; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_uop_lrs1_0 = io_wakeups_1_bits_uop_lrs1; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_uop_lrs2_0 = io_wakeups_1_bits_uop_lrs2; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_uop_lrs3_0 = io_wakeups_1_bits_uop_lrs3; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_ldst_val_0 = io_wakeups_1_bits_uop_ldst_val; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_uop_dst_rtype_0 = io_wakeups_1_bits_uop_dst_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_uop_lrs1_rtype_0 = io_wakeups_1_bits_uop_lrs1_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_uop_lrs2_rtype_0 = io_wakeups_1_bits_uop_lrs2_rtype; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_frs3_en_0 = io_wakeups_1_bits_uop_frs3_en; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_fp_val_0 = io_wakeups_1_bits_uop_fp_val; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_fp_single_0 = io_wakeups_1_bits_uop_fp_single; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_xcpt_pf_if_0 = io_wakeups_1_bits_uop_xcpt_pf_if; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_xcpt_ae_if_0 = io_wakeups_1_bits_uop_xcpt_ae_if; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_xcpt_ma_if_0 = io_wakeups_1_bits_uop_xcpt_ma_if; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_bp_debug_if_0 = io_wakeups_1_bits_uop_bp_debug_if; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_uop_bp_xcpt_if_0 = io_wakeups_1_bits_uop_bp_xcpt_if; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_uop_debug_fsrc_0 = io_wakeups_1_bits_uop_debug_fsrc; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_uop_debug_tsrc_0 = io_wakeups_1_bits_uop_debug_tsrc; // @[rename-stage.scala:160:7] wire io_wakeups_2_valid_0 = io_wakeups_2_valid; // @[rename-stage.scala:160:7] wire [6:0] io_wakeups_2_bits_uop_uopc_0 = io_wakeups_2_bits_uop_uopc; // @[rename-stage.scala:160:7] wire [31:0] io_wakeups_2_bits_uop_inst_0 = io_wakeups_2_bits_uop_inst; // @[rename-stage.scala:160:7] wire [31:0] io_wakeups_2_bits_uop_debug_inst_0 = io_wakeups_2_bits_uop_debug_inst; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_is_rvc_0 = io_wakeups_2_bits_uop_is_rvc; // @[rename-stage.scala:160:7] wire [39:0] io_wakeups_2_bits_uop_debug_pc_0 = io_wakeups_2_bits_uop_debug_pc; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_uop_iq_type_0 = io_wakeups_2_bits_uop_iq_type; // @[rename-stage.scala:160:7] wire [9:0] io_wakeups_2_bits_uop_fu_code_0 = io_wakeups_2_bits_uop_fu_code; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_2_bits_uop_ctrl_br_type_0 = io_wakeups_2_bits_uop_ctrl_br_type; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_uop_ctrl_op1_sel_0 = io_wakeups_2_bits_uop_ctrl_op1_sel; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_uop_ctrl_op2_sel_0 = io_wakeups_2_bits_uop_ctrl_op2_sel; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_uop_ctrl_imm_sel_0 = io_wakeups_2_bits_uop_ctrl_imm_sel; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_2_bits_uop_ctrl_op_fcn_0 = io_wakeups_2_bits_uop_ctrl_op_fcn; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_ctrl_fcn_dw_0 = io_wakeups_2_bits_uop_ctrl_fcn_dw; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_uop_ctrl_csr_cmd_0 = io_wakeups_2_bits_uop_ctrl_csr_cmd; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_ctrl_is_load_0 = io_wakeups_2_bits_uop_ctrl_is_load; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_ctrl_is_sta_0 = io_wakeups_2_bits_uop_ctrl_is_sta; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_ctrl_is_std_0 = io_wakeups_2_bits_uop_ctrl_is_std; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_uop_iw_state_0 = io_wakeups_2_bits_uop_iw_state; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_iw_p1_poisoned_0 = io_wakeups_2_bits_uop_iw_p1_poisoned; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_iw_p2_poisoned_0 = io_wakeups_2_bits_uop_iw_p2_poisoned; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_is_br_0 = io_wakeups_2_bits_uop_is_br; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_is_jalr_0 = io_wakeups_2_bits_uop_is_jalr; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_is_jal_0 = io_wakeups_2_bits_uop_is_jal; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_is_sfb_0 = io_wakeups_2_bits_uop_is_sfb; // @[rename-stage.scala:160:7] wire [7:0] io_wakeups_2_bits_uop_br_mask_0 = io_wakeups_2_bits_uop_br_mask; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_uop_br_tag_0 = io_wakeups_2_bits_uop_br_tag; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_2_bits_uop_ftq_idx_0 = io_wakeups_2_bits_uop_ftq_idx; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_edge_inst_0 = io_wakeups_2_bits_uop_edge_inst; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_uop_pc_lob_0 = io_wakeups_2_bits_uop_pc_lob; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_taken_0 = io_wakeups_2_bits_uop_taken; // @[rename-stage.scala:160:7] wire [19:0] io_wakeups_2_bits_uop_imm_packed_0 = io_wakeups_2_bits_uop_imm_packed; // @[rename-stage.scala:160:7] wire [11:0] io_wakeups_2_bits_uop_csr_addr_0 = io_wakeups_2_bits_uop_csr_addr; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_2_bits_uop_rob_idx_0 = io_wakeups_2_bits_uop_rob_idx; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_uop_ldq_idx_0 = io_wakeups_2_bits_uop_ldq_idx; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_uop_stq_idx_0 = io_wakeups_2_bits_uop_stq_idx; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_uop_rxq_idx_0 = io_wakeups_2_bits_uop_rxq_idx; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_uop_pdst_0 = io_wakeups_2_bits_uop_pdst; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_uop_prs1_0 = io_wakeups_2_bits_uop_prs1; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_uop_prs2_0 = io_wakeups_2_bits_uop_prs2; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_uop_prs3_0 = io_wakeups_2_bits_uop_prs3; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_2_bits_uop_ppred_0 = io_wakeups_2_bits_uop_ppred; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_prs1_busy_0 = io_wakeups_2_bits_uop_prs1_busy; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_prs2_busy_0 = io_wakeups_2_bits_uop_prs2_busy; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_prs3_busy_0 = io_wakeups_2_bits_uop_prs3_busy; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_ppred_busy_0 = io_wakeups_2_bits_uop_ppred_busy; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_uop_stale_pdst_0 = io_wakeups_2_bits_uop_stale_pdst; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_exception_0 = io_wakeups_2_bits_uop_exception; // @[rename-stage.scala:160:7] wire [63:0] io_wakeups_2_bits_uop_exc_cause_0 = io_wakeups_2_bits_uop_exc_cause; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_bypassable_0 = io_wakeups_2_bits_uop_bypassable; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_2_bits_uop_mem_cmd_0 = io_wakeups_2_bits_uop_mem_cmd; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_uop_mem_size_0 = io_wakeups_2_bits_uop_mem_size; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_mem_signed_0 = io_wakeups_2_bits_uop_mem_signed; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_is_fence_0 = io_wakeups_2_bits_uop_is_fence; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_is_fencei_0 = io_wakeups_2_bits_uop_is_fencei; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_is_amo_0 = io_wakeups_2_bits_uop_is_amo; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_uses_ldq_0 = io_wakeups_2_bits_uop_uses_ldq; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_uses_stq_0 = io_wakeups_2_bits_uop_uses_stq; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_is_sys_pc2epc_0 = io_wakeups_2_bits_uop_is_sys_pc2epc; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_is_unique_0 = io_wakeups_2_bits_uop_is_unique; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_flush_on_commit_0 = io_wakeups_2_bits_uop_flush_on_commit; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_ldst_is_rs1_0 = io_wakeups_2_bits_uop_ldst_is_rs1; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_uop_ldst_0 = io_wakeups_2_bits_uop_ldst; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_uop_lrs1_0 = io_wakeups_2_bits_uop_lrs1; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_uop_lrs2_0 = io_wakeups_2_bits_uop_lrs2; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_uop_lrs3_0 = io_wakeups_2_bits_uop_lrs3; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_ldst_val_0 = io_wakeups_2_bits_uop_ldst_val; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_uop_dst_rtype_0 = io_wakeups_2_bits_uop_dst_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_uop_lrs1_rtype_0 = io_wakeups_2_bits_uop_lrs1_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_uop_lrs2_rtype_0 = io_wakeups_2_bits_uop_lrs2_rtype; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_frs3_en_0 = io_wakeups_2_bits_uop_frs3_en; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_fp_val_0 = io_wakeups_2_bits_uop_fp_val; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_fp_single_0 = io_wakeups_2_bits_uop_fp_single; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_xcpt_pf_if_0 = io_wakeups_2_bits_uop_xcpt_pf_if; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_xcpt_ae_if_0 = io_wakeups_2_bits_uop_xcpt_ae_if; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_xcpt_ma_if_0 = io_wakeups_2_bits_uop_xcpt_ma_if; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_bp_debug_if_0 = io_wakeups_2_bits_uop_bp_debug_if; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_uop_bp_xcpt_if_0 = io_wakeups_2_bits_uop_bp_xcpt_if; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_uop_debug_fsrc_0 = io_wakeups_2_bits_uop_debug_fsrc; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_uop_debug_tsrc_0 = io_wakeups_2_bits_uop_debug_tsrc; // @[rename-stage.scala:160:7] wire io_com_valids_0_0 = io_com_valids_0; // @[rename-stage.scala:160:7] wire [6:0] io_com_uops_0_uopc_0 = io_com_uops_0_uopc; // @[rename-stage.scala:160:7] wire [31:0] io_com_uops_0_inst_0 = io_com_uops_0_inst; // @[rename-stage.scala:160:7] wire [31:0] io_com_uops_0_debug_inst_0 = io_com_uops_0_debug_inst; // @[rename-stage.scala:160:7] wire io_com_uops_0_is_rvc_0 = io_com_uops_0_is_rvc; // @[rename-stage.scala:160:7] wire [39:0] io_com_uops_0_debug_pc_0 = io_com_uops_0_debug_pc; // @[rename-stage.scala:160:7] wire [2:0] io_com_uops_0_iq_type_0 = io_com_uops_0_iq_type; // @[rename-stage.scala:160:7] wire [9:0] io_com_uops_0_fu_code_0 = io_com_uops_0_fu_code; // @[rename-stage.scala:160:7] wire [3:0] io_com_uops_0_ctrl_br_type_0 = io_com_uops_0_ctrl_br_type; // @[rename-stage.scala:160:7] wire [1:0] io_com_uops_0_ctrl_op1_sel_0 = io_com_uops_0_ctrl_op1_sel; // @[rename-stage.scala:160:7] wire [2:0] io_com_uops_0_ctrl_op2_sel_0 = io_com_uops_0_ctrl_op2_sel; // @[rename-stage.scala:160:7] wire [2:0] io_com_uops_0_ctrl_imm_sel_0 = io_com_uops_0_ctrl_imm_sel; // @[rename-stage.scala:160:7] wire [4:0] io_com_uops_0_ctrl_op_fcn_0 = io_com_uops_0_ctrl_op_fcn; // @[rename-stage.scala:160:7] wire io_com_uops_0_ctrl_fcn_dw_0 = io_com_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:160:7] wire [2:0] io_com_uops_0_ctrl_csr_cmd_0 = io_com_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:160:7] wire io_com_uops_0_ctrl_is_load_0 = io_com_uops_0_ctrl_is_load; // @[rename-stage.scala:160:7] wire io_com_uops_0_ctrl_is_sta_0 = io_com_uops_0_ctrl_is_sta; // @[rename-stage.scala:160:7] wire io_com_uops_0_ctrl_is_std_0 = io_com_uops_0_ctrl_is_std; // @[rename-stage.scala:160:7] wire [1:0] io_com_uops_0_iw_state_0 = io_com_uops_0_iw_state; // @[rename-stage.scala:160:7] wire io_com_uops_0_iw_p1_poisoned_0 = io_com_uops_0_iw_p1_poisoned; // @[rename-stage.scala:160:7] wire io_com_uops_0_iw_p2_poisoned_0 = io_com_uops_0_iw_p2_poisoned; // @[rename-stage.scala:160:7] wire io_com_uops_0_is_br_0 = io_com_uops_0_is_br; // @[rename-stage.scala:160:7] wire io_com_uops_0_is_jalr_0 = io_com_uops_0_is_jalr; // @[rename-stage.scala:160:7] wire io_com_uops_0_is_jal_0 = io_com_uops_0_is_jal; // @[rename-stage.scala:160:7] wire io_com_uops_0_is_sfb_0 = io_com_uops_0_is_sfb; // @[rename-stage.scala:160:7] wire [7:0] io_com_uops_0_br_mask_0 = io_com_uops_0_br_mask; // @[rename-stage.scala:160:7] wire [2:0] io_com_uops_0_br_tag_0 = io_com_uops_0_br_tag; // @[rename-stage.scala:160:7] wire [3:0] io_com_uops_0_ftq_idx_0 = io_com_uops_0_ftq_idx; // @[rename-stage.scala:160:7] wire io_com_uops_0_edge_inst_0 = io_com_uops_0_edge_inst; // @[rename-stage.scala:160:7] wire [5:0] io_com_uops_0_pc_lob_0 = io_com_uops_0_pc_lob; // @[rename-stage.scala:160:7] wire io_com_uops_0_taken_0 = io_com_uops_0_taken; // @[rename-stage.scala:160:7] wire [19:0] io_com_uops_0_imm_packed_0 = io_com_uops_0_imm_packed; // @[rename-stage.scala:160:7] wire [11:0] io_com_uops_0_csr_addr_0 = io_com_uops_0_csr_addr; // @[rename-stage.scala:160:7] wire [4:0] io_com_uops_0_rob_idx_0 = io_com_uops_0_rob_idx; // @[rename-stage.scala:160:7] wire [2:0] io_com_uops_0_ldq_idx_0 = io_com_uops_0_ldq_idx; // @[rename-stage.scala:160:7] wire [2:0] io_com_uops_0_stq_idx_0 = io_com_uops_0_stq_idx; // @[rename-stage.scala:160:7] wire [1:0] io_com_uops_0_rxq_idx_0 = io_com_uops_0_rxq_idx; // @[rename-stage.scala:160:7] wire [5:0] io_com_uops_0_pdst_0 = io_com_uops_0_pdst; // @[rename-stage.scala:160:7] wire [5:0] io_com_uops_0_prs1_0 = io_com_uops_0_prs1; // @[rename-stage.scala:160:7] wire [5:0] io_com_uops_0_prs2_0 = io_com_uops_0_prs2; // @[rename-stage.scala:160:7] wire [5:0] io_com_uops_0_prs3_0 = io_com_uops_0_prs3; // @[rename-stage.scala:160:7] wire [3:0] io_com_uops_0_ppred_0 = io_com_uops_0_ppred; // @[rename-stage.scala:160:7] wire io_com_uops_0_prs1_busy_0 = io_com_uops_0_prs1_busy; // @[rename-stage.scala:160:7] wire io_com_uops_0_prs2_busy_0 = io_com_uops_0_prs2_busy; // @[rename-stage.scala:160:7] wire io_com_uops_0_prs3_busy_0 = io_com_uops_0_prs3_busy; // @[rename-stage.scala:160:7] wire io_com_uops_0_ppred_busy_0 = io_com_uops_0_ppred_busy; // @[rename-stage.scala:160:7] wire [5:0] io_com_uops_0_stale_pdst_0 = io_com_uops_0_stale_pdst; // @[rename-stage.scala:160:7] wire io_com_uops_0_exception_0 = io_com_uops_0_exception; // @[rename-stage.scala:160:7] wire [63:0] io_com_uops_0_exc_cause_0 = io_com_uops_0_exc_cause; // @[rename-stage.scala:160:7] wire io_com_uops_0_bypassable_0 = io_com_uops_0_bypassable; // @[rename-stage.scala:160:7] wire [4:0] io_com_uops_0_mem_cmd_0 = io_com_uops_0_mem_cmd; // @[rename-stage.scala:160:7] wire [1:0] io_com_uops_0_mem_size_0 = io_com_uops_0_mem_size; // @[rename-stage.scala:160:7] wire io_com_uops_0_mem_signed_0 = io_com_uops_0_mem_signed; // @[rename-stage.scala:160:7] wire io_com_uops_0_is_fence_0 = io_com_uops_0_is_fence; // @[rename-stage.scala:160:7] wire io_com_uops_0_is_fencei_0 = io_com_uops_0_is_fencei; // @[rename-stage.scala:160:7] wire io_com_uops_0_is_amo_0 = io_com_uops_0_is_amo; // @[rename-stage.scala:160:7] wire io_com_uops_0_uses_ldq_0 = io_com_uops_0_uses_ldq; // @[rename-stage.scala:160:7] wire io_com_uops_0_uses_stq_0 = io_com_uops_0_uses_stq; // @[rename-stage.scala:160:7] wire io_com_uops_0_is_sys_pc2epc_0 = io_com_uops_0_is_sys_pc2epc; // @[rename-stage.scala:160:7] wire io_com_uops_0_is_unique_0 = io_com_uops_0_is_unique; // @[rename-stage.scala:160:7] wire io_com_uops_0_flush_on_commit_0 = io_com_uops_0_flush_on_commit; // @[rename-stage.scala:160:7] wire io_com_uops_0_ldst_is_rs1_0 = io_com_uops_0_ldst_is_rs1; // @[rename-stage.scala:160:7] wire [5:0] io_com_uops_0_ldst_0 = io_com_uops_0_ldst; // @[rename-stage.scala:160:7] wire [5:0] io_com_uops_0_lrs1_0 = io_com_uops_0_lrs1; // @[rename-stage.scala:160:7] wire [5:0] io_com_uops_0_lrs2_0 = io_com_uops_0_lrs2; // @[rename-stage.scala:160:7] wire [5:0] io_com_uops_0_lrs3_0 = io_com_uops_0_lrs3; // @[rename-stage.scala:160:7] wire io_com_uops_0_ldst_val_0 = io_com_uops_0_ldst_val; // @[rename-stage.scala:160:7] wire [1:0] io_com_uops_0_dst_rtype_0 = io_com_uops_0_dst_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_com_uops_0_lrs1_rtype_0 = io_com_uops_0_lrs1_rtype; // @[rename-stage.scala:160:7] wire [1:0] io_com_uops_0_lrs2_rtype_0 = io_com_uops_0_lrs2_rtype; // @[rename-stage.scala:160:7] wire io_com_uops_0_frs3_en_0 = io_com_uops_0_frs3_en; // @[rename-stage.scala:160:7] wire io_com_uops_0_fp_val_0 = io_com_uops_0_fp_val; // @[rename-stage.scala:160:7] wire io_com_uops_0_fp_single_0 = io_com_uops_0_fp_single; // @[rename-stage.scala:160:7] wire io_com_uops_0_xcpt_pf_if_0 = io_com_uops_0_xcpt_pf_if; // @[rename-stage.scala:160:7] wire io_com_uops_0_xcpt_ae_if_0 = io_com_uops_0_xcpt_ae_if; // @[rename-stage.scala:160:7] wire io_com_uops_0_xcpt_ma_if_0 = io_com_uops_0_xcpt_ma_if; // @[rename-stage.scala:160:7] wire io_com_uops_0_bp_debug_if_0 = io_com_uops_0_bp_debug_if; // @[rename-stage.scala:160:7] wire io_com_uops_0_bp_xcpt_if_0 = io_com_uops_0_bp_xcpt_if; // @[rename-stage.scala:160:7] wire [1:0] io_com_uops_0_debug_fsrc_0 = io_com_uops_0_debug_fsrc; // @[rename-stage.scala:160:7] wire [1:0] io_com_uops_0_debug_tsrc_0 = io_com_uops_0_debug_tsrc; // @[rename-stage.scala:160:7] wire io_rbk_valids_0_0 = io_rbk_valids_0; // @[rename-stage.scala:160:7] wire io_rollback_0 = io_rollback; // @[rename-stage.scala:160:7] wire io_debug_rob_empty_0 = io_debug_rob_empty; // @[rename-stage.scala:160:7] wire [3:0] io_dec_uops_0_ctrl_br_type = 4'h0; // @[rename-stage.scala:160:7] wire [3:0] io_dec_uops_0_ppred = 4'h0; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_br_type = 4'h0; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_1_bits_fflags_bits_uop_ftq_idx = 4'h0; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_1_bits_fflags_bits_uop_ppred = 4'h0; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_2_bits_fflags_bits_uop_ctrl_br_type = 4'h0; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_2_bits_fflags_bits_uop_ftq_idx = 4'h0; // @[rename-stage.scala:160:7] wire [3:0] io_wakeups_2_bits_fflags_bits_uop_ppred = 4'h0; // @[rename-stage.scala:160:7] wire [3:0] ren1_uops_0_ctrl_br_type = 4'h0; // @[rename-stage.scala:101:29] wire [3:0] ren1_uops_0_ppred = 4'h0; // @[rename-stage.scala:101:29] wire [1:0] io_dec_uops_0_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_dec_uops_0_iw_state = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_dec_uops_0_rxq_idx = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_dec_uops_0_debug_tsrc = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_fflags_bits_uop_iw_state = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_fflags_bits_uop_rxq_idx = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_fflags_bits_uop_mem_size = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_fflags_bits_uop_dst_rtype = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_fflags_bits_uop_lrs1_rtype = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_fflags_bits_uop_lrs2_rtype = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_fflags_bits_uop_debug_fsrc = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_1_bits_fflags_bits_uop_debug_tsrc = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_fflags_bits_uop_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_fflags_bits_uop_iw_state = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_fflags_bits_uop_rxq_idx = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_fflags_bits_uop_mem_size = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_fflags_bits_uop_dst_rtype = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_fflags_bits_uop_lrs1_rtype = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_fflags_bits_uop_lrs2_rtype = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_fflags_bits_uop_debug_fsrc = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] io_wakeups_2_bits_fflags_bits_uop_debug_tsrc = 2'h0; // @[rename-stage.scala:160:7] wire [1:0] ren1_uops_0_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_0_iw_state = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_0_rxq_idx = 2'h0; // @[rename-stage.scala:101:29] wire [1:0] ren1_uops_0_debug_tsrc = 2'h0; // @[rename-stage.scala:101:29] wire [2:0] io_dec_uops_0_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_dec_uops_0_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_dec_uops_0_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_dec_uops_0_ldq_idx = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_dec_uops_0_stq_idx = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_fflags_bits_uop_iq_type = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_fflags_bits_uop_br_tag = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_fflags_bits_uop_ldq_idx = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_1_bits_fflags_bits_uop_stq_idx = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_fflags_bits_uop_iq_type = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_fflags_bits_uop_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_fflags_bits_uop_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_fflags_bits_uop_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_fflags_bits_uop_br_tag = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_fflags_bits_uop_ldq_idx = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] io_wakeups_2_bits_fflags_bits_uop_stq_idx = 3'h0; // @[rename-stage.scala:160:7] wire [2:0] ren1_uops_0_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:101:29] wire [2:0] ren1_uops_0_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:101:29] wire [2:0] ren1_uops_0_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:101:29] wire [2:0] ren1_uops_0_ldq_idx = 3'h0; // @[rename-stage.scala:101:29] wire [2:0] ren1_uops_0_stq_idx = 3'h0; // @[rename-stage.scala:101:29] wire [4:0] io_dec_uops_0_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:160:7] wire [4:0] io_dec_uops_0_rob_idx = 5'h0; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_1_bits_fflags_bits_uop_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_1_bits_fflags_bits_uop_rob_idx = 5'h0; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_1_bits_fflags_bits_uop_mem_cmd = 5'h0; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_1_bits_fflags_bits_flags = 5'h0; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_2_bits_fflags_bits_uop_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_2_bits_fflags_bits_uop_rob_idx = 5'h0; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_2_bits_fflags_bits_uop_mem_cmd = 5'h0; // @[rename-stage.scala:160:7] wire [4:0] io_wakeups_2_bits_fflags_bits_flags = 5'h0; // @[rename-stage.scala:160:7] wire [4:0] ren1_uops_0_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:101:29] wire [4:0] ren1_uops_0_rob_idx = 5'h0; // @[rename-stage.scala:101:29] wire io_dec_uops_0_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:160:7] wire io_dec_uops_0_ctrl_is_load = 1'h0; // @[rename-stage.scala:160:7] wire io_dec_uops_0_ctrl_is_sta = 1'h0; // @[rename-stage.scala:160:7] wire io_dec_uops_0_ctrl_is_std = 1'h0; // @[rename-stage.scala:160:7] wire io_dec_uops_0_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:160:7] wire io_dec_uops_0_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:160:7] wire io_dec_uops_0_prs1_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_dec_uops_0_prs2_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_dec_uops_0_prs3_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_dec_uops_0_ppred_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_dec_uops_0_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:160:7] wire io_dec_uops_0_xcpt_ma_if = 1'h0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_prs3_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_predicated = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_valid = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_is_rvc = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_ctrl_is_load = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_ctrl_is_sta = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_ctrl_is_std = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_is_br = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_is_jalr = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_is_jal = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_is_sfb = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_edge_inst = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_taken = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_prs1_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_prs2_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_prs3_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_ppred_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_exception = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_bypassable = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_mem_signed = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_is_fence = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_is_fencei = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_is_amo = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_uses_ldq = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_uses_stq = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_is_sys_pc2epc = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_is_unique = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_flush_on_commit = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_ldst_val = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_frs3_en = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_fp_val = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_fp_single = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_xcpt_pf_if = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_xcpt_ae_if = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_xcpt_ma_if = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_bp_debug_if = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_1_bits_fflags_bits_uop_bp_xcpt_if = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_predicated = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_valid = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_is_rvc = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_ctrl_is_load = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_ctrl_is_sta = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_ctrl_is_std = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_is_br = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_is_jalr = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_is_jal = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_is_sfb = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_edge_inst = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_taken = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_prs1_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_prs2_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_prs3_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_ppred_busy = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_exception = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_bypassable = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_mem_signed = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_is_fence = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_is_fencei = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_is_amo = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_uses_ldq = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_uses_stq = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_is_sys_pc2epc = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_is_unique = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_flush_on_commit = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_ldst_val = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_frs3_en = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_fp_val = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_fp_single = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_xcpt_pf_if = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_xcpt_ae_if = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_xcpt_ma_if = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_bp_debug_if = 1'h0; // @[rename-stage.scala:160:7] wire io_wakeups_2_bits_fflags_bits_uop_bp_xcpt_if = 1'h0; // @[rename-stage.scala:160:7] wire ren1_uops_0_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_ctrl_is_load = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_ctrl_is_sta = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_ctrl_is_std = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_prs1_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_prs2_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_prs3_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_ppred_busy = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:101:29] wire ren1_uops_0_xcpt_ma_if = 1'h0; // @[rename-stage.scala:101:29] wire ren2_uops_0_prs3_busy = 1'h0; // @[rename-stage.scala:108:29] wire next_uop_prs3_busy = 1'h0; // @[rename-stage.scala:123:24] wire r_uop_bypassed_uop_prs3_busy = 1'h0; // @[rename-stage.scala:174:28] wire r_uop_newuop_prs3_busy = 1'h0; // @[util.scala:73:26] wire _ren2_uops_0_prs3_busy_T = 1'h0; // @[rename-stage.scala:325:34] wire bypassed_uop_prs3_busy = 1'h0; // @[rename-stage.scala:341:28] wire io_ren2_uops_0_newuop_prs3_busy = 1'h0; // @[util.scala:73:26] wire [11:0] io_dec_uops_0_csr_addr = 12'h0; // @[rename-stage.scala:160:7] wire [11:0] io_wakeups_1_bits_fflags_bits_uop_csr_addr = 12'h0; // @[rename-stage.scala:160:7] wire [11:0] io_wakeups_2_bits_fflags_bits_uop_csr_addr = 12'h0; // @[rename-stage.scala:160:7] wire [11:0] ren1_uops_0_csr_addr = 12'h0; // @[rename-stage.scala:101:29] wire [5:0] io_dec_uops_0_pdst = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_dec_uops_0_prs1 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_dec_uops_0_prs2 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_dec_uops_0_prs3 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_dec_uops_0_stale_pdst = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_ren2_uops_0_prs3 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_fflags_bits_uop_pc_lob = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_fflags_bits_uop_pdst = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_fflags_bits_uop_prs1 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_fflags_bits_uop_prs2 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_fflags_bits_uop_prs3 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_fflags_bits_uop_stale_pdst = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_fflags_bits_uop_ldst = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs1 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs2 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_1_bits_fflags_bits_uop_lrs3 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_fflags_bits_uop_pc_lob = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_fflags_bits_uop_pdst = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_fflags_bits_uop_prs1 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_fflags_bits_uop_prs2 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_fflags_bits_uop_prs3 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_fflags_bits_uop_stale_pdst = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_fflags_bits_uop_ldst = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_fflags_bits_uop_lrs1 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_fflags_bits_uop_lrs2 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] io_wakeups_2_bits_fflags_bits_uop_lrs3 = 6'h0; // @[rename-stage.scala:160:7] wire [5:0] ren1_uops_0_pdst = 6'h0; // @[rename-stage.scala:101:29] wire [5:0] ren1_uops_0_prs3 = 6'h0; // @[rename-stage.scala:101:29] wire [5:0] ren2_uops_0_prs3 = 6'h0; // @[rename-stage.scala:108:29] wire [5:0] next_uop_prs3 = 6'h0; // @[rename-stage.scala:123:24] wire [5:0] r_uop_bypassed_uop_prs3 = 6'h0; // @[rename-stage.scala:174:28] wire [5:0] r_uop_newuop_prs3 = 6'h0; // @[util.scala:73:26] wire [5:0] bypassed_uop_prs3 = 6'h0; // @[rename-stage.scala:341:28] wire [5:0] io_ren2_uops_0_newuop_prs3 = 6'h0; // @[util.scala:73:26] wire [63:0] io_wakeups_1_bits_data = 64'h0; // @[rename-stage.scala:160:7] wire [63:0] io_wakeups_1_bits_fflags_bits_uop_exc_cause = 64'h0; // @[rename-stage.scala:160:7] wire [63:0] io_wakeups_2_bits_data = 64'h0; // @[rename-stage.scala:160:7] wire [63:0] io_wakeups_2_bits_fflags_bits_uop_exc_cause = 64'h0; // @[rename-stage.scala:160:7] wire [6:0] io_wakeups_1_bits_fflags_bits_uop_uopc = 7'h0; // @[rename-stage.scala:160:7] wire [6:0] io_wakeups_2_bits_fflags_bits_uop_uopc = 7'h0; // @[rename-stage.scala:160:7] wire [31:0] io_wakeups_1_bits_fflags_bits_uop_inst = 32'h0; // @[rename-stage.scala:160:7] wire [31:0] io_wakeups_1_bits_fflags_bits_uop_debug_inst = 32'h0; // @[rename-stage.scala:160:7] wire [31:0] io_wakeups_2_bits_fflags_bits_uop_inst = 32'h0; // @[rename-stage.scala:160:7] wire [31:0] io_wakeups_2_bits_fflags_bits_uop_debug_inst = 32'h0; // @[rename-stage.scala:160:7] wire [39:0] io_wakeups_1_bits_fflags_bits_uop_debug_pc = 40'h0; // @[rename-stage.scala:160:7] wire [39:0] io_wakeups_2_bits_fflags_bits_uop_debug_pc = 40'h0; // @[rename-stage.scala:160:7] wire [9:0] io_wakeups_1_bits_fflags_bits_uop_fu_code = 10'h0; // @[rename-stage.scala:160:7] wire [9:0] io_wakeups_2_bits_fflags_bits_uop_fu_code = 10'h0; // @[rename-stage.scala:160:7] wire [7:0] io_wakeups_1_bits_fflags_bits_uop_br_mask = 8'h0; // @[rename-stage.scala:160:7] wire [7:0] io_wakeups_2_bits_fflags_bits_uop_br_mask = 8'h0; // @[rename-stage.scala:160:7] wire [19:0] io_wakeups_1_bits_fflags_bits_uop_imm_packed = 20'h0; // @[rename-stage.scala:160:7] wire [19:0] io_wakeups_2_bits_fflags_bits_uop_imm_packed = 20'h0; // @[rename-stage.scala:160:7] wire [51:0] io_debug_isprlist = 52'h0; // @[rename-stage.scala:160:7] wire _io_ren_stalls_0_T_2; // @[rename-stage.scala:339:60] wire ren1_fire_0 = io_dec_fire_0_0; // @[rename-stage.scala:100:29, :160:7] wire [6:0] ren1_uops_0_uopc = io_dec_uops_0_uopc_0; // @[rename-stage.scala:101:29, :160:7] wire [31:0] ren1_uops_0_inst = io_dec_uops_0_inst_0; // @[rename-stage.scala:101:29, :160:7] wire [31:0] ren1_uops_0_debug_inst = io_dec_uops_0_debug_inst_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_is_rvc = io_dec_uops_0_is_rvc_0; // @[rename-stage.scala:101:29, :160:7] wire [39:0] ren1_uops_0_debug_pc = io_dec_uops_0_debug_pc_0; // @[rename-stage.scala:101:29, :160:7] wire [2:0] ren1_uops_0_iq_type = io_dec_uops_0_iq_type_0; // @[rename-stage.scala:101:29, :160:7] wire [9:0] ren1_uops_0_fu_code = io_dec_uops_0_fu_code_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_is_br = io_dec_uops_0_is_br_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_is_jalr = io_dec_uops_0_is_jalr_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_is_jal = io_dec_uops_0_is_jal_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_is_sfb = io_dec_uops_0_is_sfb_0; // @[rename-stage.scala:101:29, :160:7] wire [7:0] ren1_uops_0_br_mask = io_dec_uops_0_br_mask_0; // @[rename-stage.scala:101:29, :160:7] wire [2:0] ren1_uops_0_br_tag = io_dec_uops_0_br_tag_0; // @[rename-stage.scala:101:29, :160:7] wire [3:0] ren1_uops_0_ftq_idx = io_dec_uops_0_ftq_idx_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_edge_inst = io_dec_uops_0_edge_inst_0; // @[rename-stage.scala:101:29, :160:7] wire [5:0] ren1_uops_0_pc_lob = io_dec_uops_0_pc_lob_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_taken = io_dec_uops_0_taken_0; // @[rename-stage.scala:101:29, :160:7] wire [19:0] ren1_uops_0_imm_packed = io_dec_uops_0_imm_packed_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_exception = io_dec_uops_0_exception_0; // @[rename-stage.scala:101:29, :160:7] wire [63:0] ren1_uops_0_exc_cause = io_dec_uops_0_exc_cause_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_bypassable = io_dec_uops_0_bypassable_0; // @[rename-stage.scala:101:29, :160:7] wire [4:0] ren1_uops_0_mem_cmd = io_dec_uops_0_mem_cmd_0; // @[rename-stage.scala:101:29, :160:7] wire [1:0] ren1_uops_0_mem_size = io_dec_uops_0_mem_size_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_mem_signed = io_dec_uops_0_mem_signed_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_is_fence = io_dec_uops_0_is_fence_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_is_fencei = io_dec_uops_0_is_fencei_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_is_amo = io_dec_uops_0_is_amo_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_uses_ldq = io_dec_uops_0_uses_ldq_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_uses_stq = io_dec_uops_0_uses_stq_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_is_sys_pc2epc = io_dec_uops_0_is_sys_pc2epc_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_is_unique = io_dec_uops_0_is_unique_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_flush_on_commit = io_dec_uops_0_flush_on_commit_0; // @[rename-stage.scala:101:29, :160:7] wire [5:0] ren1_uops_0_ldst = io_dec_uops_0_ldst_0; // @[rename-stage.scala:101:29, :160:7] wire [5:0] ren1_uops_0_lrs1 = io_dec_uops_0_lrs1_0; // @[rename-stage.scala:101:29, :160:7] wire [5:0] ren1_uops_0_lrs2 = io_dec_uops_0_lrs2_0; // @[rename-stage.scala:101:29, :160:7] wire [5:0] ren1_uops_0_lrs3 = io_dec_uops_0_lrs3_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_ldst_val = io_dec_uops_0_ldst_val_0; // @[rename-stage.scala:101:29, :160:7] wire [1:0] ren1_uops_0_dst_rtype = io_dec_uops_0_dst_rtype_0; // @[rename-stage.scala:101:29, :160:7] wire [1:0] ren1_uops_0_lrs1_rtype = io_dec_uops_0_lrs1_rtype_0; // @[rename-stage.scala:101:29, :160:7] wire [1:0] ren1_uops_0_lrs2_rtype = io_dec_uops_0_lrs2_rtype_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_frs3_en = io_dec_uops_0_frs3_en_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_fp_val = io_dec_uops_0_fp_val_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_fp_single = io_dec_uops_0_fp_single_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_xcpt_pf_if = io_dec_uops_0_xcpt_pf_if_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_xcpt_ae_if = io_dec_uops_0_xcpt_ae_if_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_bp_debug_if = io_dec_uops_0_bp_debug_if_0; // @[rename-stage.scala:101:29, :160:7] wire ren1_uops_0_bp_xcpt_if = io_dec_uops_0_bp_xcpt_if_0; // @[rename-stage.scala:101:29, :160:7] wire [1:0] ren1_uops_0_debug_fsrc = io_dec_uops_0_debug_fsrc_0; // @[rename-stage.scala:101:29, :160:7] wire ren2_valids_0; // @[rename-stage.scala:107:29] wire [6:0] io_ren2_uops_0_newuop_uopc; // @[util.scala:73:26] wire [31:0] io_ren2_uops_0_newuop_inst; // @[util.scala:73:26] wire [31:0] io_ren2_uops_0_newuop_debug_inst; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_is_rvc; // @[util.scala:73:26] wire [39:0] io_ren2_uops_0_newuop_debug_pc; // @[util.scala:73:26] wire [2:0] io_ren2_uops_0_newuop_iq_type; // @[util.scala:73:26] wire [9:0] io_ren2_uops_0_newuop_fu_code; // @[util.scala:73:26] wire [3:0] io_ren2_uops_0_newuop_ctrl_br_type; // @[util.scala:73:26] wire [1:0] io_ren2_uops_0_newuop_ctrl_op1_sel; // @[util.scala:73:26] wire [2:0] io_ren2_uops_0_newuop_ctrl_op2_sel; // @[util.scala:73:26] wire [2:0] io_ren2_uops_0_newuop_ctrl_imm_sel; // @[util.scala:73:26] wire [4:0] io_ren2_uops_0_newuop_ctrl_op_fcn; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_ctrl_fcn_dw; // @[util.scala:73:26] wire [2:0] io_ren2_uops_0_newuop_ctrl_csr_cmd; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_ctrl_is_load; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_ctrl_is_sta; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_ctrl_is_std; // @[util.scala:73:26] wire [1:0] io_ren2_uops_0_newuop_iw_state; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_iw_p1_poisoned; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_iw_p2_poisoned; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_is_br; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_is_jalr; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_is_jal; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_is_sfb; // @[util.scala:73:26] wire [7:0] io_ren2_uops_0_newuop_br_mask; // @[util.scala:73:26] wire [2:0] io_ren2_uops_0_newuop_br_tag; // @[util.scala:73:26] wire [3:0] io_ren2_uops_0_newuop_ftq_idx; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_edge_inst; // @[util.scala:73:26] wire [5:0] io_ren2_uops_0_newuop_pc_lob; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_taken; // @[util.scala:73:26] wire [19:0] io_ren2_uops_0_newuop_imm_packed; // @[util.scala:73:26] wire [11:0] io_ren2_uops_0_newuop_csr_addr; // @[util.scala:73:26] wire [4:0] io_ren2_uops_0_newuop_rob_idx; // @[util.scala:73:26] wire [2:0] io_ren2_uops_0_newuop_ldq_idx; // @[util.scala:73:26] wire [2:0] io_ren2_uops_0_newuop_stq_idx; // @[util.scala:73:26] wire [1:0] io_ren2_uops_0_newuop_rxq_idx; // @[util.scala:73:26] wire [5:0] io_ren2_uops_0_newuop_pdst; // @[util.scala:73:26] wire [5:0] io_ren2_uops_0_newuop_prs1; // @[util.scala:73:26] wire [5:0] io_ren2_uops_0_newuop_prs2; // @[util.scala:73:26] wire [3:0] io_ren2_uops_0_newuop_ppred; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_prs1_busy; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_prs2_busy; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_ppred_busy; // @[util.scala:73:26] wire [5:0] io_ren2_uops_0_newuop_stale_pdst; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_exception; // @[util.scala:73:26] wire [63:0] io_ren2_uops_0_newuop_exc_cause; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_bypassable; // @[util.scala:73:26] wire [4:0] io_ren2_uops_0_newuop_mem_cmd; // @[util.scala:73:26] wire [1:0] io_ren2_uops_0_newuop_mem_size; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_mem_signed; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_is_fence; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_is_fencei; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_is_amo; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_uses_ldq; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_uses_stq; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_is_sys_pc2epc; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_is_unique; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_flush_on_commit; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_ldst_is_rs1; // @[util.scala:73:26] wire [5:0] io_ren2_uops_0_newuop_ldst; // @[util.scala:73:26] wire [5:0] io_ren2_uops_0_newuop_lrs1; // @[util.scala:73:26] wire [5:0] io_ren2_uops_0_newuop_lrs2; // @[util.scala:73:26] wire [5:0] io_ren2_uops_0_newuop_lrs3; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_ldst_val; // @[util.scala:73:26] wire [1:0] io_ren2_uops_0_newuop_dst_rtype; // @[util.scala:73:26] wire [1:0] io_ren2_uops_0_newuop_lrs1_rtype; // @[util.scala:73:26] wire [1:0] io_ren2_uops_0_newuop_lrs2_rtype; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_frs3_en; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_fp_val; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_fp_single; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_xcpt_pf_if; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_xcpt_ae_if; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_xcpt_ma_if; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_bp_debug_if; // @[util.scala:73:26] wire io_ren2_uops_0_newuop_bp_xcpt_if; // @[util.scala:73:26] wire [1:0] io_ren2_uops_0_newuop_debug_fsrc; // @[util.scala:73:26] wire [1:0] io_ren2_uops_0_newuop_debug_tsrc; // @[util.scala:73:26] wire io_ren_stalls_0_0; // @[rename-stage.scala:160:7] wire io_ren2_mask_0_0; // @[rename-stage.scala:160:7] wire [3:0] io_ren2_uops_0_ctrl_br_type_0; // @[rename-stage.scala:160:7] wire [1:0] io_ren2_uops_0_ctrl_op1_sel_0; // @[rename-stage.scala:160:7] wire [2:0] io_ren2_uops_0_ctrl_op2_sel_0; // @[rename-stage.scala:160:7] wire [2:0] io_ren2_uops_0_ctrl_imm_sel_0; // @[rename-stage.scala:160:7] wire [4:0] io_ren2_uops_0_ctrl_op_fcn_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_ctrl_fcn_dw_0; // @[rename-stage.scala:160:7] wire [2:0] io_ren2_uops_0_ctrl_csr_cmd_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_ctrl_is_load_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_ctrl_is_sta_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_ctrl_is_std_0; // @[rename-stage.scala:160:7] wire [6:0] io_ren2_uops_0_uopc_0; // @[rename-stage.scala:160:7] wire [31:0] io_ren2_uops_0_inst_0; // @[rename-stage.scala:160:7] wire [31:0] io_ren2_uops_0_debug_inst_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_is_rvc_0; // @[rename-stage.scala:160:7] wire [39:0] io_ren2_uops_0_debug_pc_0; // @[rename-stage.scala:160:7] wire [2:0] io_ren2_uops_0_iq_type_0; // @[rename-stage.scala:160:7] wire [9:0] io_ren2_uops_0_fu_code_0; // @[rename-stage.scala:160:7] wire [1:0] io_ren2_uops_0_iw_state_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_iw_p1_poisoned_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_iw_p2_poisoned_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_is_br_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_is_jalr_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_is_jal_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_is_sfb_0; // @[rename-stage.scala:160:7] wire [7:0] io_ren2_uops_0_br_mask_0; // @[rename-stage.scala:160:7] wire [2:0] io_ren2_uops_0_br_tag_0; // @[rename-stage.scala:160:7] wire [3:0] io_ren2_uops_0_ftq_idx_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_edge_inst_0; // @[rename-stage.scala:160:7] wire [5:0] io_ren2_uops_0_pc_lob_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_taken_0; // @[rename-stage.scala:160:7] wire [19:0] io_ren2_uops_0_imm_packed_0; // @[rename-stage.scala:160:7] wire [11:0] io_ren2_uops_0_csr_addr_0; // @[rename-stage.scala:160:7] wire [4:0] io_ren2_uops_0_rob_idx; // @[rename-stage.scala:160:7] wire [2:0] io_ren2_uops_0_ldq_idx; // @[rename-stage.scala:160:7] wire [2:0] io_ren2_uops_0_stq_idx; // @[rename-stage.scala:160:7] wire [1:0] io_ren2_uops_0_rxq_idx_0; // @[rename-stage.scala:160:7] wire [5:0] io_ren2_uops_0_pdst_0; // @[rename-stage.scala:160:7] wire [5:0] io_ren2_uops_0_prs1_0; // @[rename-stage.scala:160:7] wire [5:0] io_ren2_uops_0_prs2_0; // @[rename-stage.scala:160:7] wire [3:0] io_ren2_uops_0_ppred; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_prs1_busy_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_prs2_busy_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_ppred_busy; // @[rename-stage.scala:160:7] wire [5:0] io_ren2_uops_0_stale_pdst_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_exception_0; // @[rename-stage.scala:160:7] wire [63:0] io_ren2_uops_0_exc_cause_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_bypassable_0; // @[rename-stage.scala:160:7] wire [4:0] io_ren2_uops_0_mem_cmd_0; // @[rename-stage.scala:160:7] wire [1:0] io_ren2_uops_0_mem_size_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_mem_signed_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_is_fence_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_is_fencei_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_is_amo_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_uses_ldq_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_uses_stq_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_is_sys_pc2epc_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_is_unique_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_flush_on_commit_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_ldst_is_rs1_0; // @[rename-stage.scala:160:7] wire [5:0] io_ren2_uops_0_ldst_0; // @[rename-stage.scala:160:7] wire [5:0] io_ren2_uops_0_lrs1_0; // @[rename-stage.scala:160:7] wire [5:0] io_ren2_uops_0_lrs2_0; // @[rename-stage.scala:160:7] wire [5:0] io_ren2_uops_0_lrs3_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_ldst_val_0; // @[rename-stage.scala:160:7] wire [1:0] io_ren2_uops_0_dst_rtype_0; // @[rename-stage.scala:160:7] wire [1:0] io_ren2_uops_0_lrs1_rtype_0; // @[rename-stage.scala:160:7] wire [1:0] io_ren2_uops_0_lrs2_rtype_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_frs3_en_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_fp_val_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_fp_single_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_xcpt_pf_if_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_xcpt_ae_if_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_xcpt_ma_if_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_bp_debug_if_0; // @[rename-stage.scala:160:7] wire io_ren2_uops_0_bp_xcpt_if_0; // @[rename-stage.scala:160:7] wire [1:0] io_ren2_uops_0_debug_fsrc_0; // @[rename-stage.scala:160:7] wire [1:0] io_ren2_uops_0_debug_tsrc_0; // @[rename-stage.scala:160:7] wire [51:0] io_debug_freelist; // @[rename-stage.scala:160:7] wire [51:0] io_debug_busytable; // @[rename-stage.scala:160:7] wire [5:0] map_reqs_0_ldst = ren1_uops_0_ldst; // @[rename-stage.scala:101:29, :252:24] wire [5:0] map_reqs_0_lrs1 = ren1_uops_0_lrs1; // @[rename-stage.scala:101:29, :252:24] wire [5:0] map_reqs_0_lrs2 = ren1_uops_0_lrs2; // @[rename-stage.scala:101:29, :252:24] wire [5:0] map_reqs_0_lrs3 = ren1_uops_0_lrs3; // @[rename-stage.scala:101:29, :252:24] wire [5:0] ren1_uops_0_prs1; // @[rename-stage.scala:101:29] wire [5:0] ren1_uops_0_prs2; // @[rename-stage.scala:101:29] wire [5:0] ren1_uops_0_stale_pdst; // @[rename-stage.scala:101:29] assign io_ren2_mask_0_0 = ren2_valids_0; // @[rename-stage.scala:107:29, :160:7] wire [6:0] bypassed_uop_uopc = ren2_uops_0_uopc; // @[rename-stage.scala:108:29, :341:28] wire [31:0] bypassed_uop_inst = ren2_uops_0_inst; // @[rename-stage.scala:108:29, :341:28] wire [31:0] bypassed_uop_debug_inst = ren2_uops_0_debug_inst; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_is_rvc = ren2_uops_0_is_rvc; // @[rename-stage.scala:108:29, :341:28] wire [39:0] bypassed_uop_debug_pc = ren2_uops_0_debug_pc; // @[rename-stage.scala:108:29, :341:28] wire [2:0] bypassed_uop_iq_type = ren2_uops_0_iq_type; // @[rename-stage.scala:108:29, :341:28] wire [9:0] bypassed_uop_fu_code = ren2_uops_0_fu_code; // @[rename-stage.scala:108:29, :341:28] wire [3:0] bypassed_uop_ctrl_br_type = ren2_uops_0_ctrl_br_type; // @[rename-stage.scala:108:29, :341:28] wire [1:0] bypassed_uop_ctrl_op1_sel = ren2_uops_0_ctrl_op1_sel; // @[rename-stage.scala:108:29, :341:28] wire [2:0] bypassed_uop_ctrl_op2_sel = ren2_uops_0_ctrl_op2_sel; // @[rename-stage.scala:108:29, :341:28] wire [2:0] bypassed_uop_ctrl_imm_sel = ren2_uops_0_ctrl_imm_sel; // @[rename-stage.scala:108:29, :341:28] wire [4:0] bypassed_uop_ctrl_op_fcn = ren2_uops_0_ctrl_op_fcn; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_ctrl_fcn_dw = ren2_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :341:28] wire [2:0] bypassed_uop_ctrl_csr_cmd = ren2_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_ctrl_is_load = ren2_uops_0_ctrl_is_load; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_ctrl_is_sta = ren2_uops_0_ctrl_is_sta; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_ctrl_is_std = ren2_uops_0_ctrl_is_std; // @[rename-stage.scala:108:29, :341:28] wire [1:0] bypassed_uop_iw_state = ren2_uops_0_iw_state; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_iw_p1_poisoned = ren2_uops_0_iw_p1_poisoned; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_iw_p2_poisoned = ren2_uops_0_iw_p2_poisoned; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_is_br = ren2_uops_0_is_br; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_is_jalr = ren2_uops_0_is_jalr; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_is_jal = ren2_uops_0_is_jal; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_is_sfb = ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29, :341:28] wire [7:0] bypassed_uop_br_mask = ren2_uops_0_br_mask; // @[rename-stage.scala:108:29, :341:28] wire [2:0] ren2_br_tags_0_bits = ren2_uops_0_br_tag; // @[rename-stage.scala:108:29, :233:29] wire [2:0] bypassed_uop_br_tag = ren2_uops_0_br_tag; // @[rename-stage.scala:108:29, :341:28] wire [3:0] bypassed_uop_ftq_idx = ren2_uops_0_ftq_idx; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_edge_inst = ren2_uops_0_edge_inst; // @[rename-stage.scala:108:29, :341:28] wire [5:0] bypassed_uop_pc_lob = ren2_uops_0_pc_lob; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_taken = ren2_uops_0_taken; // @[rename-stage.scala:108:29, :341:28] wire [19:0] bypassed_uop_imm_packed = ren2_uops_0_imm_packed; // @[rename-stage.scala:108:29, :341:28] wire [11:0] bypassed_uop_csr_addr = ren2_uops_0_csr_addr; // @[rename-stage.scala:108:29, :341:28] wire [4:0] bypassed_uop_rob_idx = ren2_uops_0_rob_idx; // @[rename-stage.scala:108:29, :341:28] wire [2:0] bypassed_uop_ldq_idx = ren2_uops_0_ldq_idx; // @[rename-stage.scala:108:29, :341:28] wire [2:0] bypassed_uop_stq_idx = ren2_uops_0_stq_idx; // @[rename-stage.scala:108:29, :341:28] wire [5:0] _ren2_uops_0_pdst_T_2; // @[rename-stage.scala:306:20] wire [1:0] bypassed_uop_rxq_idx = ren2_uops_0_rxq_idx; // @[rename-stage.scala:108:29, :341:28] wire [5:0] bypassed_uop_pdst = ren2_uops_0_pdst; // @[rename-stage.scala:108:29, :341:28] wire [5:0] bypassed_uop_prs1 = ren2_uops_0_prs1; // @[rename-stage.scala:108:29, :341:28] wire [5:0] bypassed_uop_prs2 = ren2_uops_0_prs2; // @[rename-stage.scala:108:29, :341:28] wire _ren2_uops_0_prs1_busy_T_1; // @[rename-stage.scala:323:47] wire [3:0] bypassed_uop_ppred = ren2_uops_0_ppred; // @[rename-stage.scala:108:29, :341:28] wire _ren2_uops_0_prs2_busy_T_1; // @[rename-stage.scala:324:47] wire bypassed_uop_prs1_busy = ren2_uops_0_prs1_busy; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_prs2_busy = ren2_uops_0_prs2_busy; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_ppred_busy = ren2_uops_0_ppred_busy; // @[rename-stage.scala:108:29, :341:28] wire [5:0] bypassed_uop_stale_pdst = ren2_uops_0_stale_pdst; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_exception = ren2_uops_0_exception; // @[rename-stage.scala:108:29, :341:28] wire [63:0] bypassed_uop_exc_cause = ren2_uops_0_exc_cause; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_bypassable = ren2_uops_0_bypassable; // @[rename-stage.scala:108:29, :341:28] wire [4:0] bypassed_uop_mem_cmd = ren2_uops_0_mem_cmd; // @[rename-stage.scala:108:29, :341:28] wire [1:0] bypassed_uop_mem_size = ren2_uops_0_mem_size; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_mem_signed = ren2_uops_0_mem_signed; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_is_fence = ren2_uops_0_is_fence; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_is_fencei = ren2_uops_0_is_fencei; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_is_amo = ren2_uops_0_is_amo; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_uses_ldq = ren2_uops_0_uses_ldq; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_uses_stq = ren2_uops_0_uses_stq; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_is_sys_pc2epc = ren2_uops_0_is_sys_pc2epc; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_is_unique = ren2_uops_0_is_unique; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_flush_on_commit = ren2_uops_0_flush_on_commit; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_ldst_is_rs1 = ren2_uops_0_ldst_is_rs1; // @[rename-stage.scala:108:29, :341:28] wire [5:0] bypassed_uop_ldst = ren2_uops_0_ldst; // @[rename-stage.scala:108:29, :341:28] wire [5:0] bypassed_uop_lrs1 = ren2_uops_0_lrs1; // @[rename-stage.scala:108:29, :341:28] wire [5:0] bypassed_uop_lrs2 = ren2_uops_0_lrs2; // @[rename-stage.scala:108:29, :341:28] wire [5:0] bypassed_uop_lrs3 = ren2_uops_0_lrs3; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_ldst_val = ren2_uops_0_ldst_val; // @[rename-stage.scala:108:29, :341:28] wire [1:0] bypassed_uop_dst_rtype = ren2_uops_0_dst_rtype; // @[rename-stage.scala:108:29, :341:28] wire [1:0] bypassed_uop_lrs1_rtype = ren2_uops_0_lrs1_rtype; // @[rename-stage.scala:108:29, :341:28] wire [1:0] bypassed_uop_lrs2_rtype = ren2_uops_0_lrs2_rtype; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_frs3_en = ren2_uops_0_frs3_en; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_fp_val = ren2_uops_0_fp_val; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_fp_single = ren2_uops_0_fp_single; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_xcpt_pf_if = ren2_uops_0_xcpt_pf_if; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_xcpt_ae_if = ren2_uops_0_xcpt_ae_if; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_xcpt_ma_if = ren2_uops_0_xcpt_ma_if; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_bp_debug_if = ren2_uops_0_bp_debug_if; // @[rename-stage.scala:108:29, :341:28] wire bypassed_uop_bp_xcpt_if = ren2_uops_0_bp_xcpt_if; // @[rename-stage.scala:108:29, :341:28] wire [1:0] bypassed_uop_debug_fsrc = ren2_uops_0_debug_fsrc; // @[rename-stage.scala:108:29, :341:28] wire [1:0] bypassed_uop_debug_tsrc = ren2_uops_0_debug_tsrc; // @[rename-stage.scala:108:29, :341:28] wire _ren2_alloc_reqs_0_T_2; // @[rename-stage.scala:240:88] wire ren2_alloc_reqs_0; // @[rename-stage.scala:109:29] reg r_valid; // @[rename-stage.scala:121:27] assign ren2_valids_0 = r_valid; // @[rename-stage.scala:107:29, :121:27] reg [6:0] r_uop_uopc; // @[rename-stage.scala:122:23] assign ren2_uops_0_uopc = r_uop_uopc; // @[rename-stage.scala:108:29, :122:23] reg [31:0] r_uop_inst; // @[rename-stage.scala:122:23] assign ren2_uops_0_inst = r_uop_inst; // @[rename-stage.scala:108:29, :122:23] reg [31:0] r_uop_debug_inst; // @[rename-stage.scala:122:23] assign ren2_uops_0_debug_inst = r_uop_debug_inst; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_rvc; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_rvc = r_uop_is_rvc; // @[rename-stage.scala:108:29, :122:23] reg [39:0] r_uop_debug_pc; // @[rename-stage.scala:122:23] assign ren2_uops_0_debug_pc = r_uop_debug_pc; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_iq_type; // @[rename-stage.scala:122:23] assign ren2_uops_0_iq_type = r_uop_iq_type; // @[rename-stage.scala:108:29, :122:23] reg [9:0] r_uop_fu_code; // @[rename-stage.scala:122:23] assign ren2_uops_0_fu_code = r_uop_fu_code; // @[rename-stage.scala:108:29, :122:23] reg [3:0] r_uop_ctrl_br_type; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_br_type = r_uop_ctrl_br_type; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_ctrl_op1_sel; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_op1_sel = r_uop_ctrl_op1_sel; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_ctrl_op2_sel; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_op2_sel = r_uop_ctrl_op2_sel; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_ctrl_imm_sel; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_imm_sel = r_uop_ctrl_imm_sel; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_ctrl_op_fcn; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_op_fcn = r_uop_ctrl_op_fcn; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ctrl_fcn_dw; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_fcn_dw = r_uop_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_ctrl_csr_cmd; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_csr_cmd = r_uop_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ctrl_is_load; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_is_load = r_uop_ctrl_is_load; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ctrl_is_sta; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_is_sta = r_uop_ctrl_is_sta; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ctrl_is_std; // @[rename-stage.scala:122:23] assign ren2_uops_0_ctrl_is_std = r_uop_ctrl_is_std; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_iw_state; // @[rename-stage.scala:122:23] assign ren2_uops_0_iw_state = r_uop_iw_state; // @[rename-stage.scala:108:29, :122:23] reg r_uop_iw_p1_poisoned; // @[rename-stage.scala:122:23] assign ren2_uops_0_iw_p1_poisoned = r_uop_iw_p1_poisoned; // @[rename-stage.scala:108:29, :122:23] reg r_uop_iw_p2_poisoned; // @[rename-stage.scala:122:23] assign ren2_uops_0_iw_p2_poisoned = r_uop_iw_p2_poisoned; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_br; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_br = r_uop_is_br; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_jalr; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_jalr = r_uop_is_jalr; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_jal; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_jal = r_uop_is_jal; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_sfb; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_sfb = r_uop_is_sfb; // @[rename-stage.scala:108:29, :122:23] reg [7:0] r_uop_br_mask; // @[rename-stage.scala:122:23] assign ren2_uops_0_br_mask = r_uop_br_mask; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_br_tag; // @[rename-stage.scala:122:23] assign ren2_uops_0_br_tag = r_uop_br_tag; // @[rename-stage.scala:108:29, :122:23] reg [3:0] r_uop_ftq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_0_ftq_idx = r_uop_ftq_idx; // @[rename-stage.scala:108:29, :122:23] reg r_uop_edge_inst; // @[rename-stage.scala:122:23] assign ren2_uops_0_edge_inst = r_uop_edge_inst; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_pc_lob; // @[rename-stage.scala:122:23] assign ren2_uops_0_pc_lob = r_uop_pc_lob; // @[rename-stage.scala:108:29, :122:23] reg r_uop_taken; // @[rename-stage.scala:122:23] assign ren2_uops_0_taken = r_uop_taken; // @[rename-stage.scala:108:29, :122:23] reg [19:0] r_uop_imm_packed; // @[rename-stage.scala:122:23] assign ren2_uops_0_imm_packed = r_uop_imm_packed; // @[rename-stage.scala:108:29, :122:23] reg [11:0] r_uop_csr_addr; // @[rename-stage.scala:122:23] assign ren2_uops_0_csr_addr = r_uop_csr_addr; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_rob_idx; // @[rename-stage.scala:122:23] assign ren2_uops_0_rob_idx = r_uop_rob_idx; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_ldq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_0_ldq_idx = r_uop_ldq_idx; // @[rename-stage.scala:108:29, :122:23] reg [2:0] r_uop_stq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_0_stq_idx = r_uop_stq_idx; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_rxq_idx; // @[rename-stage.scala:122:23] assign ren2_uops_0_rxq_idx = r_uop_rxq_idx; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_pdst; // @[rename-stage.scala:122:23] reg [5:0] r_uop_prs1; // @[rename-stage.scala:122:23] assign ren2_uops_0_prs1 = r_uop_prs1; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_prs2; // @[rename-stage.scala:122:23] assign ren2_uops_0_prs2 = r_uop_prs2; // @[rename-stage.scala:108:29, :122:23] reg [3:0] r_uop_ppred; // @[rename-stage.scala:122:23] assign ren2_uops_0_ppred = r_uop_ppred; // @[rename-stage.scala:108:29, :122:23] reg r_uop_prs1_busy; // @[rename-stage.scala:122:23] reg r_uop_prs2_busy; // @[rename-stage.scala:122:23] reg r_uop_ppred_busy; // @[rename-stage.scala:122:23] assign ren2_uops_0_ppred_busy = r_uop_ppred_busy; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_stale_pdst; // @[rename-stage.scala:122:23] assign ren2_uops_0_stale_pdst = r_uop_stale_pdst; // @[rename-stage.scala:108:29, :122:23] reg r_uop_exception; // @[rename-stage.scala:122:23] assign ren2_uops_0_exception = r_uop_exception; // @[rename-stage.scala:108:29, :122:23] reg [63:0] r_uop_exc_cause; // @[rename-stage.scala:122:23] assign ren2_uops_0_exc_cause = r_uop_exc_cause; // @[rename-stage.scala:108:29, :122:23] reg r_uop_bypassable; // @[rename-stage.scala:122:23] assign ren2_uops_0_bypassable = r_uop_bypassable; // @[rename-stage.scala:108:29, :122:23] reg [4:0] r_uop_mem_cmd; // @[rename-stage.scala:122:23] assign ren2_uops_0_mem_cmd = r_uop_mem_cmd; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_mem_size; // @[rename-stage.scala:122:23] assign ren2_uops_0_mem_size = r_uop_mem_size; // @[rename-stage.scala:108:29, :122:23] reg r_uop_mem_signed; // @[rename-stage.scala:122:23] assign ren2_uops_0_mem_signed = r_uop_mem_signed; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_fence; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_fence = r_uop_is_fence; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_fencei; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_fencei = r_uop_is_fencei; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_amo; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_amo = r_uop_is_amo; // @[rename-stage.scala:108:29, :122:23] reg r_uop_uses_ldq; // @[rename-stage.scala:122:23] assign ren2_uops_0_uses_ldq = r_uop_uses_ldq; // @[rename-stage.scala:108:29, :122:23] reg r_uop_uses_stq; // @[rename-stage.scala:122:23] assign ren2_uops_0_uses_stq = r_uop_uses_stq; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_sys_pc2epc; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_sys_pc2epc = r_uop_is_sys_pc2epc; // @[rename-stage.scala:108:29, :122:23] reg r_uop_is_unique; // @[rename-stage.scala:122:23] assign ren2_uops_0_is_unique = r_uop_is_unique; // @[rename-stage.scala:108:29, :122:23] reg r_uop_flush_on_commit; // @[rename-stage.scala:122:23] assign ren2_uops_0_flush_on_commit = r_uop_flush_on_commit; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ldst_is_rs1; // @[rename-stage.scala:122:23] assign ren2_uops_0_ldst_is_rs1 = r_uop_ldst_is_rs1; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_ldst; // @[rename-stage.scala:122:23] assign ren2_uops_0_ldst = r_uop_ldst; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_lrs1; // @[rename-stage.scala:122:23] assign ren2_uops_0_lrs1 = r_uop_lrs1; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_lrs2; // @[rename-stage.scala:122:23] assign ren2_uops_0_lrs2 = r_uop_lrs2; // @[rename-stage.scala:108:29, :122:23] reg [5:0] r_uop_lrs3; // @[rename-stage.scala:122:23] assign ren2_uops_0_lrs3 = r_uop_lrs3; // @[rename-stage.scala:108:29, :122:23] reg r_uop_ldst_val; // @[rename-stage.scala:122:23] assign ren2_uops_0_ldst_val = r_uop_ldst_val; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_dst_rtype; // @[rename-stage.scala:122:23] assign ren2_uops_0_dst_rtype = r_uop_dst_rtype; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_lrs1_rtype; // @[rename-stage.scala:122:23] assign ren2_uops_0_lrs1_rtype = r_uop_lrs1_rtype; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_lrs2_rtype; // @[rename-stage.scala:122:23] assign ren2_uops_0_lrs2_rtype = r_uop_lrs2_rtype; // @[rename-stage.scala:108:29, :122:23] reg r_uop_frs3_en; // @[rename-stage.scala:122:23] assign ren2_uops_0_frs3_en = r_uop_frs3_en; // @[rename-stage.scala:108:29, :122:23] reg r_uop_fp_val; // @[rename-stage.scala:122:23] assign ren2_uops_0_fp_val = r_uop_fp_val; // @[rename-stage.scala:108:29, :122:23] reg r_uop_fp_single; // @[rename-stage.scala:122:23] assign ren2_uops_0_fp_single = r_uop_fp_single; // @[rename-stage.scala:108:29, :122:23] reg r_uop_xcpt_pf_if; // @[rename-stage.scala:122:23] assign ren2_uops_0_xcpt_pf_if = r_uop_xcpt_pf_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_xcpt_ae_if; // @[rename-stage.scala:122:23] assign ren2_uops_0_xcpt_ae_if = r_uop_xcpt_ae_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_xcpt_ma_if; // @[rename-stage.scala:122:23] assign ren2_uops_0_xcpt_ma_if = r_uop_xcpt_ma_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_bp_debug_if; // @[rename-stage.scala:122:23] assign ren2_uops_0_bp_debug_if = r_uop_bp_debug_if; // @[rename-stage.scala:108:29, :122:23] reg r_uop_bp_xcpt_if; // @[rename-stage.scala:122:23] assign ren2_uops_0_bp_xcpt_if = r_uop_bp_xcpt_if; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_debug_fsrc; // @[rename-stage.scala:122:23] assign ren2_uops_0_debug_fsrc = r_uop_debug_fsrc; // @[rename-stage.scala:108:29, :122:23] reg [1:0] r_uop_debug_tsrc; // @[rename-stage.scala:122:23] assign ren2_uops_0_debug_tsrc = r_uop_debug_tsrc; // @[rename-stage.scala:108:29, :122:23] wire [6:0] r_uop_bypassed_uop_uopc = next_uop_uopc; // @[rename-stage.scala:123:24, :174:28] wire [31:0] r_uop_bypassed_uop_inst = next_uop_inst; // @[rename-stage.scala:123:24, :174:28] wire [31:0] r_uop_bypassed_uop_debug_inst = next_uop_debug_inst; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_is_rvc = next_uop_is_rvc; // @[rename-stage.scala:123:24, :174:28] wire [39:0] r_uop_bypassed_uop_debug_pc = next_uop_debug_pc; // @[rename-stage.scala:123:24, :174:28] wire [2:0] r_uop_bypassed_uop_iq_type = next_uop_iq_type; // @[rename-stage.scala:123:24, :174:28] wire [9:0] r_uop_bypassed_uop_fu_code = next_uop_fu_code; // @[rename-stage.scala:123:24, :174:28] wire [3:0] r_uop_bypassed_uop_ctrl_br_type = next_uop_ctrl_br_type; // @[rename-stage.scala:123:24, :174:28] wire [1:0] r_uop_bypassed_uop_ctrl_op1_sel = next_uop_ctrl_op1_sel; // @[rename-stage.scala:123:24, :174:28] wire [2:0] r_uop_bypassed_uop_ctrl_op2_sel = next_uop_ctrl_op2_sel; // @[rename-stage.scala:123:24, :174:28] wire [2:0] r_uop_bypassed_uop_ctrl_imm_sel = next_uop_ctrl_imm_sel; // @[rename-stage.scala:123:24, :174:28] wire [4:0] r_uop_bypassed_uop_ctrl_op_fcn = next_uop_ctrl_op_fcn; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_ctrl_fcn_dw = next_uop_ctrl_fcn_dw; // @[rename-stage.scala:123:24, :174:28] wire [2:0] r_uop_bypassed_uop_ctrl_csr_cmd = next_uop_ctrl_csr_cmd; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_ctrl_is_load = next_uop_ctrl_is_load; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_ctrl_is_sta = next_uop_ctrl_is_sta; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_ctrl_is_std = next_uop_ctrl_is_std; // @[rename-stage.scala:123:24, :174:28] wire [1:0] r_uop_bypassed_uop_iw_state = next_uop_iw_state; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_iw_p1_poisoned = next_uop_iw_p1_poisoned; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_iw_p2_poisoned = next_uop_iw_p2_poisoned; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_is_br = next_uop_is_br; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_is_jalr = next_uop_is_jalr; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_is_jal = next_uop_is_jal; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_is_sfb = next_uop_is_sfb; // @[rename-stage.scala:123:24, :174:28] wire [7:0] r_uop_bypassed_uop_br_mask = next_uop_br_mask; // @[rename-stage.scala:123:24, :174:28] wire [2:0] r_uop_bypassed_uop_br_tag = next_uop_br_tag; // @[rename-stage.scala:123:24, :174:28] wire [3:0] r_uop_bypassed_uop_ftq_idx = next_uop_ftq_idx; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_edge_inst = next_uop_edge_inst; // @[rename-stage.scala:123:24, :174:28] wire [5:0] r_uop_bypassed_uop_pc_lob = next_uop_pc_lob; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_taken = next_uop_taken; // @[rename-stage.scala:123:24, :174:28] wire [19:0] r_uop_bypassed_uop_imm_packed = next_uop_imm_packed; // @[rename-stage.scala:123:24, :174:28] wire [11:0] r_uop_bypassed_uop_csr_addr = next_uop_csr_addr; // @[rename-stage.scala:123:24, :174:28] wire [4:0] r_uop_bypassed_uop_rob_idx = next_uop_rob_idx; // @[rename-stage.scala:123:24, :174:28] wire [2:0] r_uop_bypassed_uop_ldq_idx = next_uop_ldq_idx; // @[rename-stage.scala:123:24, :174:28] wire [2:0] r_uop_bypassed_uop_stq_idx = next_uop_stq_idx; // @[rename-stage.scala:123:24, :174:28] wire [1:0] r_uop_bypassed_uop_rxq_idx = next_uop_rxq_idx; // @[rename-stage.scala:123:24, :174:28] wire [5:0] r_uop_bypassed_uop_pdst = next_uop_pdst; // @[rename-stage.scala:123:24, :174:28] wire [3:0] r_uop_bypassed_uop_ppred = next_uop_ppred; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_ppred_busy = next_uop_ppred_busy; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_exception = next_uop_exception; // @[rename-stage.scala:123:24, :174:28] wire [63:0] r_uop_bypassed_uop_exc_cause = next_uop_exc_cause; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_bypassable = next_uop_bypassable; // @[rename-stage.scala:123:24, :174:28] wire [4:0] r_uop_bypassed_uop_mem_cmd = next_uop_mem_cmd; // @[rename-stage.scala:123:24, :174:28] wire [1:0] r_uop_bypassed_uop_mem_size = next_uop_mem_size; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_mem_signed = next_uop_mem_signed; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_is_fence = next_uop_is_fence; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_is_fencei = next_uop_is_fencei; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_is_amo = next_uop_is_amo; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_uses_ldq = next_uop_uses_ldq; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_uses_stq = next_uop_uses_stq; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_is_sys_pc2epc = next_uop_is_sys_pc2epc; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_is_unique = next_uop_is_unique; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_flush_on_commit = next_uop_flush_on_commit; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_ldst_is_rs1 = next_uop_ldst_is_rs1; // @[rename-stage.scala:123:24, :174:28] wire [5:0] r_uop_bypassed_uop_ldst = next_uop_ldst; // @[rename-stage.scala:123:24, :174:28] wire [5:0] r_uop_bypassed_uop_lrs1 = next_uop_lrs1; // @[rename-stage.scala:123:24, :174:28] wire [5:0] r_uop_bypassed_uop_lrs2 = next_uop_lrs2; // @[rename-stage.scala:123:24, :174:28] wire [5:0] r_uop_bypassed_uop_lrs3 = next_uop_lrs3; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_ldst_val = next_uop_ldst_val; // @[rename-stage.scala:123:24, :174:28] wire [1:0] r_uop_bypassed_uop_dst_rtype = next_uop_dst_rtype; // @[rename-stage.scala:123:24, :174:28] wire [1:0] r_uop_bypassed_uop_lrs1_rtype = next_uop_lrs1_rtype; // @[rename-stage.scala:123:24, :174:28] wire [1:0] r_uop_bypassed_uop_lrs2_rtype = next_uop_lrs2_rtype; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_frs3_en = next_uop_frs3_en; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_fp_val = next_uop_fp_val; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_fp_single = next_uop_fp_single; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_xcpt_pf_if = next_uop_xcpt_pf_if; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_xcpt_ae_if = next_uop_xcpt_ae_if; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_xcpt_ma_if = next_uop_xcpt_ma_if; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_bp_debug_if = next_uop_bp_debug_if; // @[rename-stage.scala:123:24, :174:28] wire r_uop_bypassed_uop_bp_xcpt_if = next_uop_bp_xcpt_if; // @[rename-stage.scala:123:24, :174:28] wire [1:0] r_uop_bypassed_uop_debug_fsrc = next_uop_debug_fsrc; // @[rename-stage.scala:123:24, :174:28] wire [1:0] r_uop_bypassed_uop_debug_tsrc = next_uop_debug_tsrc; // @[rename-stage.scala:123:24, :174:28] wire [5:0] next_uop_prs1; // @[rename-stage.scala:123:24] wire [5:0] next_uop_prs2; // @[rename-stage.scala:123:24] wire next_uop_prs1_busy; // @[rename-stage.scala:123:24] wire next_uop_prs2_busy; // @[rename-stage.scala:123:24] wire [5:0] next_uop_stale_pdst; // @[rename-stage.scala:123:24] wire _r_valid_T = ~io_dis_fire_0_0; // @[rename-stage.scala:133:29, :160:7] wire _r_valid_T_1 = r_valid & _r_valid_T; // @[rename-stage.scala:121:27, :133:{26,29}] wire _GEN = io_kill_0 | ~io_dis_ready_0; // @[rename-stage.scala:125:14, :127:20, :129:30, :160:7] assign next_uop_uopc = _GEN ? r_uop_uopc : ren1_uops_0_uopc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_inst = _GEN ? r_uop_inst : ren1_uops_0_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_debug_inst = _GEN ? r_uop_debug_inst : ren1_uops_0_debug_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_rvc = _GEN ? r_uop_is_rvc : ren1_uops_0_is_rvc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_debug_pc = _GEN ? r_uop_debug_pc : ren1_uops_0_debug_pc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_iq_type = _GEN ? r_uop_iq_type : ren1_uops_0_iq_type; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_fu_code = _GEN ? r_uop_fu_code : ren1_uops_0_fu_code; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_br_type = _GEN ? r_uop_ctrl_br_type : 4'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_op1_sel = _GEN ? r_uop_ctrl_op1_sel : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_op2_sel = _GEN ? r_uop_ctrl_op2_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_imm_sel = _GEN ? r_uop_ctrl_imm_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_op_fcn = _GEN ? r_uop_ctrl_op_fcn : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_fcn_dw = _GEN & r_uop_ctrl_fcn_dw; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_csr_cmd = _GEN ? r_uop_ctrl_csr_cmd : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_is_load = _GEN & r_uop_ctrl_is_load; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_is_sta = _GEN & r_uop_ctrl_is_sta; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ctrl_is_std = _GEN & r_uop_ctrl_is_std; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_iw_state = _GEN ? r_uop_iw_state : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_iw_p1_poisoned = _GEN & r_uop_iw_p1_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_iw_p2_poisoned = _GEN & r_uop_iw_p2_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_br = _GEN ? r_uop_is_br : ren1_uops_0_is_br; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_jalr = _GEN ? r_uop_is_jalr : ren1_uops_0_is_jalr; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_jal = _GEN ? r_uop_is_jal : ren1_uops_0_is_jal; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_sfb = _GEN ? r_uop_is_sfb : ren1_uops_0_is_sfb; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_br_mask = _GEN ? r_uop_br_mask : ren1_uops_0_br_mask; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_br_tag = _GEN ? r_uop_br_tag : ren1_uops_0_br_tag; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ftq_idx = _GEN ? r_uop_ftq_idx : ren1_uops_0_ftq_idx; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_edge_inst = _GEN ? r_uop_edge_inst : ren1_uops_0_edge_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_pc_lob = _GEN ? r_uop_pc_lob : ren1_uops_0_pc_lob; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_taken = _GEN ? r_uop_taken : ren1_uops_0_taken; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_imm_packed = _GEN ? r_uop_imm_packed : ren1_uops_0_imm_packed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_csr_addr = _GEN ? r_uop_csr_addr : 12'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_rob_idx = _GEN ? r_uop_rob_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ldq_idx = _GEN ? r_uop_ldq_idx : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_stq_idx = _GEN ? r_uop_stq_idx : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_rxq_idx = _GEN ? r_uop_rxq_idx : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_pdst = _GEN ? r_uop_pdst : 6'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_prs1 = _GEN ? r_uop_prs1 : ren1_uops_0_prs1; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_prs2 = _GEN ? r_uop_prs2 : ren1_uops_0_prs2; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ppred = _GEN ? r_uop_ppred : 4'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_prs1_busy = _GEN & r_uop_prs1_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_prs2_busy = _GEN & r_uop_prs2_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ppred_busy = _GEN & r_uop_ppred_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_stale_pdst = _GEN ? r_uop_stale_pdst : ren1_uops_0_stale_pdst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_exception = _GEN ? r_uop_exception : ren1_uops_0_exception; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_exc_cause = _GEN ? r_uop_exc_cause : ren1_uops_0_exc_cause; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_bypassable = _GEN ? r_uop_bypassable : ren1_uops_0_bypassable; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_mem_cmd = _GEN ? r_uop_mem_cmd : ren1_uops_0_mem_cmd; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_mem_size = _GEN ? r_uop_mem_size : ren1_uops_0_mem_size; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_mem_signed = _GEN ? r_uop_mem_signed : ren1_uops_0_mem_signed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_fence = _GEN ? r_uop_is_fence : ren1_uops_0_is_fence; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_fencei = _GEN ? r_uop_is_fencei : ren1_uops_0_is_fencei; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_amo = _GEN ? r_uop_is_amo : ren1_uops_0_is_amo; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_uses_ldq = _GEN ? r_uop_uses_ldq : ren1_uops_0_uses_ldq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_uses_stq = _GEN ? r_uop_uses_stq : ren1_uops_0_uses_stq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_sys_pc2epc = _GEN ? r_uop_is_sys_pc2epc : ren1_uops_0_is_sys_pc2epc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_is_unique = _GEN ? r_uop_is_unique : ren1_uops_0_is_unique; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_flush_on_commit = _GEN ? r_uop_flush_on_commit : ren1_uops_0_flush_on_commit; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ldst_is_rs1 = _GEN & r_uop_ldst_is_rs1; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ldst = _GEN ? r_uop_ldst : ren1_uops_0_ldst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_lrs1 = _GEN ? r_uop_lrs1 : ren1_uops_0_lrs1; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_lrs2 = _GEN ? r_uop_lrs2 : ren1_uops_0_lrs2; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_lrs3 = _GEN ? r_uop_lrs3 : ren1_uops_0_lrs3; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_ldst_val = _GEN ? r_uop_ldst_val : ren1_uops_0_ldst_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_dst_rtype = _GEN ? r_uop_dst_rtype : ren1_uops_0_dst_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_lrs1_rtype = _GEN ? r_uop_lrs1_rtype : ren1_uops_0_lrs1_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_lrs2_rtype = _GEN ? r_uop_lrs2_rtype : ren1_uops_0_lrs2_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_frs3_en = _GEN ? r_uop_frs3_en : ren1_uops_0_frs3_en; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_fp_val = _GEN ? r_uop_fp_val : ren1_uops_0_fp_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_fp_single = _GEN ? r_uop_fp_single : ren1_uops_0_fp_single; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_xcpt_pf_if = _GEN ? r_uop_xcpt_pf_if : ren1_uops_0_xcpt_pf_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_xcpt_ae_if = _GEN ? r_uop_xcpt_ae_if : ren1_uops_0_xcpt_ae_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_xcpt_ma_if = _GEN & r_uop_xcpt_ma_if; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_bp_debug_if = _GEN ? r_uop_bp_debug_if : ren1_uops_0_bp_debug_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_bp_xcpt_if = _GEN ? r_uop_bp_xcpt_if : ren1_uops_0_bp_xcpt_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_debug_fsrc = _GEN ? r_uop_debug_fsrc : ren1_uops_0_debug_fsrc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30] assign next_uop_debug_tsrc = _GEN ? r_uop_debug_tsrc : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30] wire [6:0] r_uop_newuop_uopc = r_uop_bypassed_uop_uopc; // @[util.scala:73:26] wire [31:0] r_uop_newuop_inst = r_uop_bypassed_uop_inst; // @[util.scala:73:26] wire [31:0] r_uop_newuop_debug_inst = r_uop_bypassed_uop_debug_inst; // @[util.scala:73:26] wire r_uop_newuop_is_rvc = r_uop_bypassed_uop_is_rvc; // @[util.scala:73:26] wire [39:0] r_uop_newuop_debug_pc = r_uop_bypassed_uop_debug_pc; // @[util.scala:73:26] wire [2:0] r_uop_newuop_iq_type = r_uop_bypassed_uop_iq_type; // @[util.scala:73:26] wire [9:0] r_uop_newuop_fu_code = r_uop_bypassed_uop_fu_code; // @[util.scala:73:26] wire [3:0] r_uop_newuop_ctrl_br_type = r_uop_bypassed_uop_ctrl_br_type; // @[util.scala:73:26] wire [1:0] r_uop_newuop_ctrl_op1_sel = r_uop_bypassed_uop_ctrl_op1_sel; // @[util.scala:73:26] wire [2:0] r_uop_newuop_ctrl_op2_sel = r_uop_bypassed_uop_ctrl_op2_sel; // @[util.scala:73:26] wire [2:0] r_uop_newuop_ctrl_imm_sel = r_uop_bypassed_uop_ctrl_imm_sel; // @[util.scala:73:26] wire [4:0] r_uop_newuop_ctrl_op_fcn = r_uop_bypassed_uop_ctrl_op_fcn; // @[util.scala:73:26] wire r_uop_newuop_ctrl_fcn_dw = r_uop_bypassed_uop_ctrl_fcn_dw; // @[util.scala:73:26] wire [2:0] r_uop_newuop_ctrl_csr_cmd = r_uop_bypassed_uop_ctrl_csr_cmd; // @[util.scala:73:26] wire r_uop_newuop_ctrl_is_load = r_uop_bypassed_uop_ctrl_is_load; // @[util.scala:73:26] wire r_uop_newuop_ctrl_is_sta = r_uop_bypassed_uop_ctrl_is_sta; // @[util.scala:73:26] wire r_uop_newuop_ctrl_is_std = r_uop_bypassed_uop_ctrl_is_std; // @[util.scala:73:26] wire [1:0] r_uop_newuop_iw_state = r_uop_bypassed_uop_iw_state; // @[util.scala:73:26] wire r_uop_newuop_iw_p1_poisoned = r_uop_bypassed_uop_iw_p1_poisoned; // @[util.scala:73:26] wire r_uop_newuop_iw_p2_poisoned = r_uop_bypassed_uop_iw_p2_poisoned; // @[util.scala:73:26] wire r_uop_newuop_is_br = r_uop_bypassed_uop_is_br; // @[util.scala:73:26] wire r_uop_newuop_is_jalr = r_uop_bypassed_uop_is_jalr; // @[util.scala:73:26] wire r_uop_newuop_is_jal = r_uop_bypassed_uop_is_jal; // @[util.scala:73:26] wire r_uop_newuop_is_sfb = r_uop_bypassed_uop_is_sfb; // @[util.scala:73:26] wire [2:0] r_uop_newuop_br_tag = r_uop_bypassed_uop_br_tag; // @[util.scala:73:26] wire [3:0] r_uop_newuop_ftq_idx = r_uop_bypassed_uop_ftq_idx; // @[util.scala:73:26] wire r_uop_newuop_edge_inst = r_uop_bypassed_uop_edge_inst; // @[util.scala:73:26] wire [5:0] r_uop_newuop_pc_lob = r_uop_bypassed_uop_pc_lob; // @[util.scala:73:26] wire r_uop_newuop_taken = r_uop_bypassed_uop_taken; // @[util.scala:73:26] wire [19:0] r_uop_newuop_imm_packed = r_uop_bypassed_uop_imm_packed; // @[util.scala:73:26] wire [11:0] r_uop_newuop_csr_addr = r_uop_bypassed_uop_csr_addr; // @[util.scala:73:26] wire [4:0] r_uop_newuop_rob_idx = r_uop_bypassed_uop_rob_idx; // @[util.scala:73:26] wire [2:0] r_uop_newuop_ldq_idx = r_uop_bypassed_uop_ldq_idx; // @[util.scala:73:26] wire [2:0] r_uop_newuop_stq_idx = r_uop_bypassed_uop_stq_idx; // @[util.scala:73:26] wire [1:0] r_uop_newuop_rxq_idx = r_uop_bypassed_uop_rxq_idx; // @[util.scala:73:26] wire [5:0] r_uop_newuop_pdst = r_uop_bypassed_uop_pdst; // @[util.scala:73:26] wire [5:0] r_uop_newuop_prs1 = r_uop_bypassed_uop_prs1; // @[util.scala:73:26] wire [5:0] r_uop_newuop_prs2 = r_uop_bypassed_uop_prs2; // @[util.scala:73:26] wire _r_uop_bypassed_uop_prs1_busy_T; // @[rename-stage.scala:199:45] wire [3:0] r_uop_newuop_ppred = r_uop_bypassed_uop_ppred; // @[util.scala:73:26] wire _r_uop_bypassed_uop_prs2_busy_T; // @[rename-stage.scala:200:45] wire r_uop_newuop_prs1_busy = r_uop_bypassed_uop_prs1_busy; // @[util.scala:73:26] wire r_uop_newuop_prs2_busy = r_uop_bypassed_uop_prs2_busy; // @[util.scala:73:26] wire r_uop_newuop_ppred_busy = r_uop_bypassed_uop_ppred_busy; // @[util.scala:73:26] wire [5:0] r_uop_newuop_stale_pdst = r_uop_bypassed_uop_stale_pdst; // @[util.scala:73:26] wire r_uop_newuop_exception = r_uop_bypassed_uop_exception; // @[util.scala:73:26] wire [63:0] r_uop_newuop_exc_cause = r_uop_bypassed_uop_exc_cause; // @[util.scala:73:26] wire r_uop_newuop_bypassable = r_uop_bypassed_uop_bypassable; // @[util.scala:73:26] wire [4:0] r_uop_newuop_mem_cmd = r_uop_bypassed_uop_mem_cmd; // @[util.scala:73:26] wire [1:0] r_uop_newuop_mem_size = r_uop_bypassed_uop_mem_size; // @[util.scala:73:26] wire r_uop_newuop_mem_signed = r_uop_bypassed_uop_mem_signed; // @[util.scala:73:26] wire r_uop_newuop_is_fence = r_uop_bypassed_uop_is_fence; // @[util.scala:73:26] wire r_uop_newuop_is_fencei = r_uop_bypassed_uop_is_fencei; // @[util.scala:73:26] wire r_uop_newuop_is_amo = r_uop_bypassed_uop_is_amo; // @[util.scala:73:26] wire r_uop_newuop_uses_ldq = r_uop_bypassed_uop_uses_ldq; // @[util.scala:73:26] wire r_uop_newuop_uses_stq = r_uop_bypassed_uop_uses_stq; // @[util.scala:73:26] wire r_uop_newuop_is_sys_pc2epc = r_uop_bypassed_uop_is_sys_pc2epc; // @[util.scala:73:26] wire r_uop_newuop_is_unique = r_uop_bypassed_uop_is_unique; // @[util.scala:73:26] wire r_uop_newuop_flush_on_commit = r_uop_bypassed_uop_flush_on_commit; // @[util.scala:73:26] wire r_uop_newuop_ldst_is_rs1 = r_uop_bypassed_uop_ldst_is_rs1; // @[util.scala:73:26] wire [5:0] r_uop_newuop_ldst = r_uop_bypassed_uop_ldst; // @[util.scala:73:26] wire [5:0] r_uop_newuop_lrs1 = r_uop_bypassed_uop_lrs1; // @[util.scala:73:26] wire [5:0] r_uop_newuop_lrs2 = r_uop_bypassed_uop_lrs2; // @[util.scala:73:26] wire [5:0] r_uop_newuop_lrs3 = r_uop_bypassed_uop_lrs3; // @[util.scala:73:26] wire r_uop_newuop_ldst_val = r_uop_bypassed_uop_ldst_val; // @[util.scala:73:26] wire [1:0] r_uop_newuop_dst_rtype = r_uop_bypassed_uop_dst_rtype; // @[util.scala:73:26] wire [1:0] r_uop_newuop_lrs1_rtype = r_uop_bypassed_uop_lrs1_rtype; // @[util.scala:73:26] wire [1:0] r_uop_newuop_lrs2_rtype = r_uop_bypassed_uop_lrs2_rtype; // @[util.scala:73:26] wire r_uop_newuop_frs3_en = r_uop_bypassed_uop_frs3_en; // @[util.scala:73:26] wire r_uop_newuop_fp_val = r_uop_bypassed_uop_fp_val; // @[util.scala:73:26] wire r_uop_newuop_fp_single = r_uop_bypassed_uop_fp_single; // @[util.scala:73:26] wire r_uop_newuop_xcpt_pf_if = r_uop_bypassed_uop_xcpt_pf_if; // @[util.scala:73:26] wire r_uop_newuop_xcpt_ae_if = r_uop_bypassed_uop_xcpt_ae_if; // @[util.scala:73:26] wire r_uop_newuop_xcpt_ma_if = r_uop_bypassed_uop_xcpt_ma_if; // @[util.scala:73:26] wire r_uop_newuop_bp_debug_if = r_uop_bypassed_uop_bp_debug_if; // @[util.scala:73:26] wire r_uop_newuop_bp_xcpt_if = r_uop_bypassed_uop_bp_xcpt_if; // @[util.scala:73:26] wire [1:0] r_uop_newuop_debug_fsrc = r_uop_bypassed_uop_debug_fsrc; // @[util.scala:73:26] wire [1:0] r_uop_newuop_debug_tsrc = r_uop_bypassed_uop_debug_tsrc; // @[util.scala:73:26] wire _r_uop_bypass_hits_rs1_T = ren2_uops_0_ldst == next_uop_lrs1; // @[rename-stage.scala:108:29, :123:24, :177:87] wire r_uop_bypass_hits_rs1_0 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_rs1_T; // @[rename-stage.scala:109:29, :177:{77,87}] wire r_uop_bypass_sel_rs1_enc = r_uop_bypass_hits_rs1_0; // @[Mux.scala:50:70] wire _r_uop_bypass_hits_rs2_T = ren2_uops_0_ldst == next_uop_lrs2; // @[rename-stage.scala:108:29, :123:24, :178:87] wire r_uop_bypass_hits_rs2_0 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_rs2_T; // @[rename-stage.scala:109:29, :178:{77,87}] wire r_uop_bypass_sel_rs2_enc = r_uop_bypass_hits_rs2_0; // @[Mux.scala:50:70] wire _r_uop_bypass_hits_rs3_T = ren2_uops_0_ldst == next_uop_lrs3; // @[rename-stage.scala:108:29, :123:24, :179:87] wire r_uop_bypass_hits_rs3_0 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_rs3_T; // @[rename-stage.scala:109:29, :179:{77,87}] wire r_uop_bypass_sel_rs3_enc = r_uop_bypass_hits_rs3_0; // @[Mux.scala:50:70] wire _r_uop_bypassed_uop_prs3_busy_T = r_uop_bypass_hits_rs3_0; // @[rename-stage.scala:179:77, :201:45] wire _r_uop_bypass_hits_dst_T = ren2_uops_0_ldst == next_uop_ldst; // @[rename-stage.scala:108:29, :123:24, :180:87] wire r_uop_bypass_hits_dst_0 = ren2_alloc_reqs_0 & _r_uop_bypass_hits_dst_T; // @[rename-stage.scala:109:29, :180:{77,87}] wire r_uop_bypass_sel_dst_enc = r_uop_bypass_hits_dst_0; // @[Mux.scala:50:70] wire r_uop_bypass_sel_rs1_0 = r_uop_bypass_sel_rs1_enc; // @[OneHot.scala:83:30] wire r_uop_bypass_sel_rs2_0 = r_uop_bypass_sel_rs2_enc; // @[OneHot.scala:83:30] wire r_uop_bypass_sel_rs3_0 = r_uop_bypass_sel_rs3_enc; // @[OneHot.scala:83:30] wire r_uop_bypass_sel_dst_0 = r_uop_bypass_sel_dst_enc; // @[OneHot.scala:83:30] assign r_uop_bypassed_uop_prs1 = r_uop_bypass_hits_rs1_0 ? ren2_uops_0_pdst : next_uop_prs1; // @[rename-stage.scala:108:29, :123:24, :174:28, :175:18, :177:77, :194:{26,52}] assign r_uop_bypassed_uop_prs2 = r_uop_bypass_hits_rs2_0 ? ren2_uops_0_pdst : next_uop_prs2; // @[rename-stage.scala:108:29, :123:24, :174:28, :175:18, :178:77, :195:{26,52}] assign r_uop_bypassed_uop_stale_pdst = r_uop_bypass_hits_dst_0 ? ren2_uops_0_pdst : next_uop_stale_pdst; // @[rename-stage.scala:108:29, :123:24, :174:28, :175:18, :180:77, :197:{26,52}] assign _r_uop_bypassed_uop_prs1_busy_T = next_uop_prs1_busy | r_uop_bypass_hits_rs1_0; // @[rename-stage.scala:123:24, :177:77, :199:45] assign r_uop_bypassed_uop_prs1_busy = _r_uop_bypassed_uop_prs1_busy_T; // @[rename-stage.scala:174:28, :199:45] assign _r_uop_bypassed_uop_prs2_busy_T = next_uop_prs2_busy | r_uop_bypass_hits_rs2_0; // @[rename-stage.scala:123:24, :178:77, :200:45] assign r_uop_bypassed_uop_prs2_busy = _r_uop_bypassed_uop_prs2_busy_T; // @[rename-stage.scala:174:28, :200:45] wire [7:0] _r_uop_newuop_br_mask_T_1; // @[util.scala:74:35] wire [7:0] r_uop_newuop_br_mask; // @[util.scala:73:26] wire [7:0] _r_uop_newuop_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:74:37] assign _r_uop_newuop_br_mask_T_1 = r_uop_bypassed_uop_br_mask & _r_uop_newuop_br_mask_T; // @[util.scala:74:{35,37}] assign r_uop_newuop_br_mask = _r_uop_newuop_br_mask_T_1; // @[util.scala:73:26, :74:35] wire _ren2_br_tags_0_valid_T_3; // @[rename-stage.scala:241:43] wire ren2_br_tags_0_valid; // @[rename-stage.scala:233:29] wire _com_valids_0_T_2; // @[rename-stage.scala:243:92] wire com_valids_0; // @[rename-stage.scala:236:29] wire _rbk_valids_0_T_2; // @[rename-stage.scala:244:92] wire rbk_valids_0; // @[rename-stage.scala:237:29] wire _GEN_0 = ren2_uops_0_dst_rtype == 2'h0; // @[rename-stage.scala:108:29, :240:78] wire _ren2_alloc_reqs_0_T; // @[rename-stage.scala:240:78] assign _ren2_alloc_reqs_0_T = _GEN_0; // @[rename-stage.scala:240:78] wire _io_ren_stalls_0_T; // @[rename-stage.scala:339:49] assign _io_ren_stalls_0_T = _GEN_0; // @[rename-stage.scala:240:78, :339:49] wire _ren2_alloc_reqs_0_T_1 = ren2_uops_0_ldst_val & _ren2_alloc_reqs_0_T; // @[rename-stage.scala:108:29, :240:{52,78}] assign _ren2_alloc_reqs_0_T_2 = _ren2_alloc_reqs_0_T_1 & io_dis_fire_0_0; // @[rename-stage.scala:160:7, :240:{52,88}] assign ren2_alloc_reqs_0 = _ren2_alloc_reqs_0_T_2; // @[rename-stage.scala:109:29, :240:88] wire _ren2_br_tags_0_valid_T = ~ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29] wire _ren2_br_tags_0_valid_T_1 = ren2_uops_0_is_br & _ren2_br_tags_0_valid_T; // @[rename-stage.scala:108:29] wire _ren2_br_tags_0_valid_T_2 = _ren2_br_tags_0_valid_T_1 | ren2_uops_0_is_jalr; // @[rename-stage.scala:108:29] assign _ren2_br_tags_0_valid_T_3 = io_dis_fire_0_0 & _ren2_br_tags_0_valid_T_2; // @[rename-stage.scala:160:7, :241:43] assign ren2_br_tags_0_valid = _ren2_br_tags_0_valid_T_3; // @[rename-stage.scala:233:29, :241:43] wire _GEN_1 = io_com_uops_0_dst_rtype_0 == 2'h0; // @[rename-stage.scala:160:7, :243:82] wire _com_valids_0_T; // @[rename-stage.scala:243:82] assign _com_valids_0_T = _GEN_1; // @[rename-stage.scala:243:82] wire _rbk_valids_0_T; // @[rename-stage.scala:244:82] assign _rbk_valids_0_T = _GEN_1; // @[rename-stage.scala:243:82, :244:82] wire _com_valids_0_T_1 = io_com_uops_0_ldst_val_0 & _com_valids_0_T; // @[rename-stage.scala:160:7, :243:{54,82}] assign _com_valids_0_T_2 = _com_valids_0_T_1 & io_com_valids_0_0; // @[rename-stage.scala:160:7, :243:{54,92}] assign com_valids_0 = _com_valids_0_T_2; // @[rename-stage.scala:236:29, :243:92] wire _rbk_valids_0_T_1 = io_com_uops_0_ldst_val_0 & _rbk_valids_0_T; // @[rename-stage.scala:160:7, :244:{54,82}] assign _rbk_valids_0_T_2 = _rbk_valids_0_T_1 & io_rbk_valids_0_0; // @[rename-stage.scala:160:7, :244:{54,92}] assign rbk_valids_0 = _rbk_valids_0_T_2; // @[rename-stage.scala:237:29, :244:92] wire [5:0] _remap_reqs_0_ldst_T; // @[rename-stage.scala:262:30] wire [5:0] _remap_reqs_0_pdst_T; // @[rename-stage.scala:263:30] wire _remap_reqs_0_valid_T; // @[rename-stage.scala:266:38] wire [5:0] remap_reqs_0_ldst; // @[rename-stage.scala:253:24] wire [5:0] remap_reqs_0_pdst; // @[rename-stage.scala:253:24] wire remap_reqs_0_valid; // @[rename-stage.scala:253:24] assign _remap_reqs_0_ldst_T = io_rollback_0 ? io_com_uops_0_ldst_0 : ren2_uops_0_ldst; // @[rename-stage.scala:108:29, :160:7, :262:30] assign remap_reqs_0_ldst = _remap_reqs_0_ldst_T; // @[rename-stage.scala:253:24, :262:30] assign _remap_reqs_0_pdst_T = io_rollback_0 ? io_com_uops_0_stale_pdst_0 : ren2_uops_0_pdst; // @[rename-stage.scala:108:29, :160:7, :263:30] assign remap_reqs_0_pdst = _remap_reqs_0_pdst_T; // @[rename-stage.scala:253:24, :263:30] assign _remap_reqs_0_valid_T = ren2_alloc_reqs_0 | rbk_valids_0; // @[rename-stage.scala:109:29, :237:29, :266:38] assign remap_reqs_0_valid = _remap_reqs_0_valid_T; // @[rename-stage.scala:253:24, :266:38] wire _freelist_io_dealloc_pregs_0_valid_T = com_valids_0 | rbk_valids_0; // @[rename-stage.scala:236:29, :237:29, :293:37] wire [5:0] _freelist_io_dealloc_pregs_0_bits_T = io_rollback_0 ? io_com_uops_0_pdst_0 : io_com_uops_0_stale_pdst_0; // @[rename-stage.scala:160:7, :295:33] wire _ren2_uops_0_pdst_T = |ren2_uops_0_ldst; // @[rename-stage.scala:108:29, :306:30] wire _ren2_uops_0_pdst_T_1 = _ren2_uops_0_pdst_T; // @[rename-stage.scala:306:{30,38}] assign _ren2_uops_0_pdst_T_2 = _ren2_uops_0_pdst_T_1 ? _freelist_io_alloc_pregs_0_bits : 6'h0; // @[rename-stage.scala:220:24, :306:{20,38}] assign ren2_uops_0_pdst = _ren2_uops_0_pdst_T_2; // @[rename-stage.scala:108:29, :306:20] wire _ren2_uops_0_prs1_busy_T = ren2_uops_0_lrs1_rtype == 2'h0; // @[rename-stage.scala:108:29, :323:37] assign _ren2_uops_0_prs1_busy_T_1 = _ren2_uops_0_prs1_busy_T & _busytable_io_busy_resps_0_prs1_busy; // @[rename-stage.scala:224:25, :323:{37,47}] assign ren2_uops_0_prs1_busy = _ren2_uops_0_prs1_busy_T_1; // @[rename-stage.scala:108:29, :323:47] wire _ren2_uops_0_prs2_busy_T = ren2_uops_0_lrs2_rtype == 2'h0; // @[rename-stage.scala:108:29, :324:37] assign _ren2_uops_0_prs2_busy_T_1 = _ren2_uops_0_prs2_busy_T & _busytable_io_busy_resps_0_prs2_busy; // @[rename-stage.scala:224:25, :324:{37,47}] assign ren2_uops_0_prs2_busy = _ren2_uops_0_prs2_busy_T_1; // @[rename-stage.scala:108:29, :324:47]
Generate the Verilog code corresponding to the following Chisel files. File Decode.scala: // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util.BitPat import chisel3.util.experimental.decode._ object DecodeLogic { // TODO This should be a method on BitPat private def hasDontCare(bp: BitPat): Boolean = bp.mask.bitCount != bp.width // Pads BitPats that are safe to pad (no don't cares), errors otherwise private def padBP(bp: BitPat, width: Int): BitPat = { if (bp.width == width) bp else { require(!hasDontCare(bp), s"Cannot pad '$bp' to '$width' bits because it has don't cares") val diff = width - bp.width require(diff > 0, s"Cannot pad '$bp' to '$width' because it is already '${bp.width}' bits wide!") BitPat(0.U(diff.W)) ## bp } } def apply(addr: UInt, default: BitPat, mapping: Iterable[(BitPat, BitPat)]): UInt = chisel3.util.experimental.decode.decoder(QMCMinimizer, addr, TruthTable(mapping, default)) def apply(addr: UInt, default: Seq[BitPat], mappingIn: Iterable[(BitPat, Seq[BitPat])]): Seq[UInt] = { val nElts = default.size require(mappingIn.forall(_._2.size == nElts), s"All Seq[BitPat] must be of the same length, got $nElts vs. ${mappingIn.find(_._2.size != nElts).get}" ) val elementsGrouped = mappingIn.map(_._2).transpose val elementWidths = elementsGrouped.zip(default).map { case (elts, default) => (default :: elts.toList).map(_.getWidth).max } val resultWidth = elementWidths.sum val elementIndices = elementWidths.scan(resultWidth - 1) { case (l, r) => l - r } // All BitPats that correspond to a given element in the result must have the same width in the // chisel3 decoder. We will zero pad any BitPats that are too small so long as they dont have // any don't cares. If there are don't cares, it is an error and the user needs to pad the // BitPat themselves val defaultsPadded = default.zip(elementWidths).map { case (bp, w) => padBP(bp, w) } val mappingInPadded = mappingIn.map { case (in, elts) => in -> elts.zip(elementWidths).map { case (bp, w) => padBP(bp, w) } } val decoded = apply(addr, defaultsPadded.reduce(_ ## _), mappingInPadded.map { case (in, out) => (in, out.reduce(_ ## _)) }) elementIndices.zip(elementIndices.tail).map { case (msb, lsb) => decoded(msb, lsb + 1) }.toList } def apply(addr: UInt, default: Seq[BitPat], mappingIn: List[(UInt, Seq[BitPat])]): Seq[UInt] = apply(addr, default, mappingIn.map(m => (BitPat(m._1), m._2)).asInstanceOf[Iterable[(BitPat, Seq[BitPat])]]) def apply(addr: UInt, trues: Iterable[UInt], falses: Iterable[UInt]): Bool = apply(addr, BitPat.dontCare(1), trues.map(BitPat(_) -> BitPat("b1")) ++ falses.map(BitPat(_) -> BitPat("b0"))).asBool } File fpu.scala: //****************************************************************************** // Copyright (c) 2015 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ package boom.v3.exu import chisel3._ import chisel3.util._ import chisel3.experimental.dataview._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.tile.FPConstants._ import freechips.rocketchip.tile.{FPUCtrlSigs, HasFPUParameters} import freechips.rocketchip.tile import freechips.rocketchip.rocket import freechips.rocketchip.util.uintToBitPat import boom.v3.common._ import boom.v3.util.{ImmGenRm, ImmGenTyp} /** * FP Decoder for the FPU * * TODO get rid of this decoder and move into the Decode stage? Or the RRd stage? * most of these signals are already created, just need to be translated * to the Rocket FPU-speak */ class UOPCodeFPUDecoder(implicit p: Parameters) extends BoomModule with HasFPUParameters { val io = IO(new Bundle { val uopc = Input(Bits(UOPC_SZ.W)) val sigs = Output(new FPUCtrlSigs()) }) // TODO change N,Y,X to BitPat("b1"), BitPat("b0"), and BitPat("b?") val N = false.B val Y = true.B val X = false.B val default: List[BitPat] = List(X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X) val f_table: Array[(BitPat, List[BitPat])] = // Note: not all of these signals are used or necessary, but we're // constrained by the need to fit the rocket.FPU units' ctrl signals. // swap12 fma // | swap32 | div // | | typeTagIn | | sqrt // ldst | | | typeTagOut | | wflags // | wen | | | | from_int | | | // | | ren1 | | | | | to_int | | | // | | | ren2 | | | | | | fastpipe | // | | | | ren3 | | | | | | | | | | // | | | | | | | | | | | | | | | | Array( BitPat(uopFCLASS_S) -> List(X,X,Y,N,N, N,X,S,S,N,Y,N, N,N,N,N), BitPat(uopFMV_W_X) -> List(X,X,N,N,N, X,X,S,D,Y,N,N, N,N,N,N), BitPat(uopFMV_X_W) -> List(X,X,Y,N,N, N,X,D,S,N,Y,N, N,N,N,N), BitPat(uopFCVT_S_X) -> List(X,X,N,N,N, X,X,S,S,Y,N,N, N,N,N,Y), BitPat(uopFCVT_X_S) -> List(X,X,Y,N,N, N,X,S,S,N,Y,N, N,N,N,Y), BitPat(uopCMPR_S) -> List(X,X,Y,Y,N, N,N,S,S,N,Y,N, N,N,N,Y), BitPat(uopFSGNJ_S) -> List(X,X,Y,Y,N, N,N,S,S,N,N,Y, N,N,N,N), BitPat(uopFMINMAX_S)-> List(X,X,Y,Y,N, N,N,S,S,N,N,Y, N,N,N,Y), BitPat(uopFADD_S) -> List(X,X,Y,Y,N, N,Y,S,S,N,N,N, Y,N,N,Y), BitPat(uopFSUB_S) -> List(X,X,Y,Y,N, N,Y,S,S,N,N,N, Y,N,N,Y), BitPat(uopFMUL_S) -> List(X,X,Y,Y,N, N,N,S,S,N,N,N, Y,N,N,Y), BitPat(uopFMADD_S) -> List(X,X,Y,Y,Y, N,N,S,S,N,N,N, Y,N,N,Y), BitPat(uopFMSUB_S) -> List(X,X,Y,Y,Y, N,N,S,S,N,N,N, Y,N,N,Y), BitPat(uopFNMADD_S) -> List(X,X,Y,Y,Y, N,N,S,S,N,N,N, Y,N,N,Y), BitPat(uopFNMSUB_S) -> List(X,X,Y,Y,Y, N,N,S,S,N,N,N, Y,N,N,Y) ) val d_table: Array[(BitPat, List[BitPat])] = Array( BitPat(uopFCLASS_D) -> List(X,X,Y,N,N, N,X,D,D,N,Y,N, N,N,N,N), BitPat(uopFMV_D_X) -> List(X,X,N,N,N, X,X,D,D,Y,N,N, N,N,N,N), BitPat(uopFMV_X_D) -> List(X,X,Y,N,N, N,X,D,D,N,Y,N, N,N,N,N), BitPat(uopFCVT_S_D) -> List(X,X,Y,N,N, N,X,D,S,N,N,Y, N,N,N,Y), BitPat(uopFCVT_D_S) -> List(X,X,Y,N,N, N,X,S,D,N,N,Y, N,N,N,Y), BitPat(uopFCVT_D_X) -> List(X,X,N,N,N, X,X,D,D,Y,N,N, N,N,N,Y), BitPat(uopFCVT_X_D) -> List(X,X,Y,N,N, N,X,D,D,N,Y,N, N,N,N,Y), BitPat(uopCMPR_D) -> List(X,X,Y,Y,N, N,N,D,D,N,Y,N, N,N,N,Y), BitPat(uopFSGNJ_D) -> List(X,X,Y,Y,N, N,N,D,D,N,N,Y, N,N,N,N), BitPat(uopFMINMAX_D)-> List(X,X,Y,Y,N, N,N,D,D,N,N,Y, N,N,N,Y), BitPat(uopFADD_D) -> List(X,X,Y,Y,N, N,Y,D,D,N,N,N, Y,N,N,Y), BitPat(uopFSUB_D) -> List(X,X,Y,Y,N, N,Y,D,D,N,N,N, Y,N,N,Y), BitPat(uopFMUL_D) -> List(X,X,Y,Y,N, N,N,D,D,N,N,N, Y,N,N,Y), BitPat(uopFMADD_D) -> List(X,X,Y,Y,Y, N,N,D,D,N,N,N, Y,N,N,Y), BitPat(uopFMSUB_D) -> List(X,X,Y,Y,Y, N,N,D,D,N,N,N, Y,N,N,Y), BitPat(uopFNMADD_D) -> List(X,X,Y,Y,Y, N,N,D,D,N,N,N, Y,N,N,Y), BitPat(uopFNMSUB_D) -> List(X,X,Y,Y,Y, N,N,D,D,N,N,N, Y,N,N,Y) ) // val insns = fLen match { // case 32 => f_table // case 64 => f_table ++ d_table // } val insns = f_table ++ d_table val decoder = rocket.DecodeLogic(io.uopc, default, insns) val s = io.sigs val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12, s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint, s.fastpipe, s.fma, s.div, s.sqrt, s.wflags) sigs zip decoder map {case(s,d) => s := d} s.vec := false.B } /** * FP fused multiple add decoder for the FPU */ class FMADecoder extends Module { val io = IO(new Bundle { val uopc = Input(UInt(UOPC_SZ.W)) val cmd = Output(UInt(2.W)) }) val default: List[BitPat] = List(BitPat("b??")) val table: Array[(BitPat, List[BitPat])] = Array( BitPat(uopFADD_S) -> List(BitPat("b00")), BitPat(uopFSUB_S) -> List(BitPat("b01")), BitPat(uopFMUL_S) -> List(BitPat("b00")), BitPat(uopFMADD_S) -> List(BitPat("b00")), BitPat(uopFMSUB_S) -> List(BitPat("b01")), BitPat(uopFNMADD_S) -> List(BitPat("b11")), BitPat(uopFNMSUB_S) -> List(BitPat("b10")), BitPat(uopFADD_D) -> List(BitPat("b00")), BitPat(uopFSUB_D) -> List(BitPat("b01")), BitPat(uopFMUL_D) -> List(BitPat("b00")), BitPat(uopFMADD_D) -> List(BitPat("b00")), BitPat(uopFMSUB_D) -> List(BitPat("b01")), BitPat(uopFNMADD_D) -> List(BitPat("b11")), BitPat(uopFNMSUB_D) -> List(BitPat("b10")) ) val decoder = rocket.DecodeLogic(io.uopc, default, table) val (cmd: UInt) :: Nil = decoder io.cmd := cmd } /** * Bundle representing data to be sent to the FPU */ class FpuReq()(implicit p: Parameters) extends BoomBundle { val uop = new MicroOp() val rs1_data = Bits(65.W) val rs2_data = Bits(65.W) val rs3_data = Bits(65.W) val fcsr_rm = Bits(tile.FPConstants.RM_SZ.W) } /** * FPU unit that wraps the RocketChip FPU units (which in turn wrap hardfloat) */ class FPU(implicit p: Parameters) extends BoomModule with tile.HasFPUParameters { val io = IO(new Bundle { val req = Flipped(new ValidIO(new FpuReq)) val resp = new ValidIO(new ExeUnitResp(65)) }) io.resp.bits := DontCare // all FP units are padded out to the same latency for easy scheduling of the write port val fpu_latency = dfmaLatency val io_req = io.req.bits val fp_decoder = Module(new UOPCodeFPUDecoder) fp_decoder.io.uopc := io_req.uop.uopc val fp_ctrl = fp_decoder.io.sigs val fp_rm = Mux(ImmGenRm(io_req.uop.imm_packed) === 7.U, io_req.fcsr_rm, ImmGenRm(io_req.uop.imm_packed)) def fuInput(minT: Option[tile.FType]): tile.FPInput = { val req = Wire(new tile.FPInput) val tag = fp_ctrl.typeTagIn req.viewAsSupertype(new tile.FPUCtrlSigs) := fp_ctrl req.rm := fp_rm req.in1 := unbox(io_req.rs1_data, tag, minT) req.in2 := unbox(io_req.rs2_data, tag, minT) req.in3 := unbox(io_req.rs3_data, tag, minT) when (fp_ctrl.swap23) { req.in3 := req.in2 } req.typ := ImmGenTyp(io_req.uop.imm_packed) req.fmt := Mux(tag === S, 0.U, 1.U) // TODO support Zfh and avoid special-case below when (io_req.uop.uopc === uopFMV_X_W) { req.fmt := 0.U } val fma_decoder = Module(new FMADecoder) fma_decoder.io.uopc := io_req.uop.uopc req.fmaCmd := fma_decoder.io.cmd // ex_reg_inst(3,2) | (!fp_ctrl.ren3 && ex_reg_inst(27)) req } val dfma = Module(new tile.FPUFMAPipe(latency = fpu_latency, t = tile.FType.D)) dfma.io.in.valid := io.req.valid && fp_ctrl.fma && (fp_ctrl.typeTagOut === D) dfma.io.in.bits := fuInput(Some(dfma.t)) val sfma = Module(new tile.FPUFMAPipe(latency = fpu_latency, t = tile.FType.S)) sfma.io.in.valid := io.req.valid && fp_ctrl.fma && (fp_ctrl.typeTagOut === S) sfma.io.in.bits := fuInput(Some(sfma.t)) val fpiu = Module(new tile.FPToInt) fpiu.io.in.valid := io.req.valid && (fp_ctrl.toint || (fp_ctrl.fastpipe && fp_ctrl.wflags)) fpiu.io.in.bits := fuInput(None) val fpiu_out = Pipe(RegNext(fpiu.io.in.valid && !fp_ctrl.fastpipe), fpiu.io.out.bits, fpu_latency-1) val fpiu_result = Wire(new tile.FPResult) fpiu_result.data := fpiu_out.bits.toint fpiu_result.exc := fpiu_out.bits.exc val fpmu = Module(new tile.FPToFP(fpu_latency)) // latency 2 for rocket fpmu.io.in.valid := io.req.valid && fp_ctrl.fastpipe fpmu.io.in.bits := fpiu.io.in.bits fpmu.io.lt := fpiu.io.out.bits.lt val fpmu_double = Pipe(io.req.valid && fp_ctrl.fastpipe, fp_ctrl.typeTagOut === D, fpu_latency).bits // Response (all FP units have been padded out to the same latency) io.resp.valid := fpiu_out.valid || fpmu.io.out.valid || sfma.io.out.valid || dfma.io.out.valid val fpu_out_data = Mux(dfma.io.out.valid, box(dfma.io.out.bits.data, true.B), Mux(sfma.io.out.valid, box(sfma.io.out.bits.data, false.B), Mux(fpiu_out.valid, fpiu_result.data, box(fpmu.io.out.bits.data, fpmu_double)))) val fpu_out_exc = Mux(dfma.io.out.valid, dfma.io.out.bits.exc, Mux(sfma.io.out.valid, sfma.io.out.bits.exc, Mux(fpiu_out.valid, fpiu_result.exc, fpmu.io.out.bits.exc))) io.resp.bits.data := fpu_out_data io.resp.bits.fflags.valid := io.resp.valid io.resp.bits.fflags.bits.flags := fpu_out_exc }
module FMADecoder_3( // @[fpu.scala:123:7] input clock, // @[fpu.scala:123:7] input reset, // @[fpu.scala:123:7] input [6:0] io_uopc, // @[fpu.scala:125:14] output [1:0] io_cmd // @[fpu.scala:125:14] ); wire [6:0] io_uopc_0 = io_uopc; // @[fpu.scala:123:7] wire [6:0] decoder_decoded_plaInput = io_uopc_0; // @[pla.scala:77:22] wire [1:0] decoder_0; // @[Decode.scala:50:77] wire [1:0] io_cmd_0; // @[fpu.scala:123:7] wire [6:0] decoder_decoded_invInputs = ~decoder_decoded_plaInput; // @[pla.scala:77:22, :78:21] wire [1:0] decoder_decoded_invMatrixOutputs; // @[pla.scala:120:37] wire [1:0] decoder_decoded; // @[pla.scala:81:23] assign decoder_0 = decoder_decoded; // @[pla.scala:81:23] wire decoder_decoded_andMatrixOutputs_andMatrixInput_0 = decoder_decoded_invInputs[0]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_4 = decoder_decoded_invInputs[0]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_1 = decoder_decoded_invInputs[1]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_4 = decoder_decoded_invInputs[1]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_2 = decoder_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_5 = decoder_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_6 = decoder_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_3 = decoder_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_1 = decoder_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_2 = decoder_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_3 = decoder_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_4 = decoder_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_1 = decoder_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_2 = decoder_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_3 = decoder_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_5 = decoder_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_1 = decoder_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_2 = decoder_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_3 = decoder_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_6 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_1 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_2 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_6_1 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_4 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_5 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_6_2 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45] wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi = {decoder_decoded_andMatrixOutputs_andMatrixInput_4, decoder_decoded_andMatrixOutputs_andMatrixInput_5}; // @[pla.scala:90:45, :91:29, :98:53] wire [2:0] decoder_decoded_andMatrixOutputs_lo = {decoder_decoded_andMatrixOutputs_lo_hi, decoder_decoded_andMatrixOutputs_andMatrixInput_6}; // @[pla.scala:90:45, :98:53] wire [1:0] decoder_decoded_andMatrixOutputs_hi_lo = {decoder_decoded_andMatrixOutputs_andMatrixInput_2, decoder_decoded_andMatrixOutputs_andMatrixInput_3}; // @[pla.scala:90:45, :91:29, :98:53] wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi = {decoder_decoded_andMatrixOutputs_andMatrixInput_0, decoder_decoded_andMatrixOutputs_andMatrixInput_1}; // @[pla.scala:91:29, :98:53] wire [3:0] decoder_decoded_andMatrixOutputs_hi = {decoder_decoded_andMatrixOutputs_hi_hi, decoder_decoded_andMatrixOutputs_hi_lo}; // @[pla.scala:98:53] wire [6:0] _decoder_decoded_andMatrixOutputs_T = {decoder_decoded_andMatrixOutputs_hi, decoder_decoded_andMatrixOutputs_lo}; // @[pla.scala:98:53] wire decoder_decoded_andMatrixOutputs_6_2 = &_decoder_decoded_andMatrixOutputs_T; // @[pla.scala:98:{53,70}] wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_1 = decoder_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_3 = decoder_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_6 = decoder_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_1 = decoder_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_2 = decoder_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_3 = decoder_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_5 = decoder_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_6 = decoder_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45] wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_1 = {decoder_decoded_andMatrixOutputs_andMatrixInput_3_1, decoder_decoded_andMatrixOutputs_andMatrixInput_4_1}; // @[pla.scala:90:45, :91:29, :98:53] wire [2:0] decoder_decoded_andMatrixOutputs_lo_1 = {decoder_decoded_andMatrixOutputs_lo_hi_1, decoder_decoded_andMatrixOutputs_andMatrixInput_5_1}; // @[pla.scala:90:45, :98:53] wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_1 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_1, decoder_decoded_andMatrixOutputs_andMatrixInput_1_1}; // @[pla.scala:90:45, :98:53] wire [2:0] decoder_decoded_andMatrixOutputs_hi_1 = {decoder_decoded_andMatrixOutputs_hi_hi_1, decoder_decoded_andMatrixOutputs_andMatrixInput_2_1}; // @[pla.scala:90:45, :98:53] wire [5:0] _decoder_decoded_andMatrixOutputs_T_1 = {decoder_decoded_andMatrixOutputs_hi_1, decoder_decoded_andMatrixOutputs_lo_1}; // @[pla.scala:98:53] wire decoder_decoded_andMatrixOutputs_0_2 = &_decoder_decoded_andMatrixOutputs_T_1; // @[pla.scala:98:{53,70}] wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_2 = decoder_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_3 = decoder_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45] wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_2 = {decoder_decoded_andMatrixOutputs_andMatrixInput_3_2, decoder_decoded_andMatrixOutputs_andMatrixInput_4_2}; // @[pla.scala:90:45, :91:29, :98:53] wire [2:0] decoder_decoded_andMatrixOutputs_lo_2 = {decoder_decoded_andMatrixOutputs_lo_hi_2, decoder_decoded_andMatrixOutputs_andMatrixInput_5_2}; // @[pla.scala:90:45, :98:53] wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_2 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_2, decoder_decoded_andMatrixOutputs_andMatrixInput_1_2}; // @[pla.scala:90:45, :98:53] wire [2:0] decoder_decoded_andMatrixOutputs_hi_2 = {decoder_decoded_andMatrixOutputs_hi_hi_2, decoder_decoded_andMatrixOutputs_andMatrixInput_2_2}; // @[pla.scala:90:45, :98:53] wire [5:0] _decoder_decoded_andMatrixOutputs_T_2 = {decoder_decoded_andMatrixOutputs_hi_2, decoder_decoded_andMatrixOutputs_lo_2}; // @[pla.scala:98:53] wire decoder_decoded_andMatrixOutputs_1_2 = &_decoder_decoded_andMatrixOutputs_T_2; // @[pla.scala:98:{53,70}] wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_3 = {decoder_decoded_andMatrixOutputs_andMatrixInput_4_3, decoder_decoded_andMatrixOutputs_andMatrixInput_5_3}; // @[pla.scala:90:45, :91:29, :98:53] wire [2:0] decoder_decoded_andMatrixOutputs_lo_3 = {decoder_decoded_andMatrixOutputs_lo_hi_3, decoder_decoded_andMatrixOutputs_andMatrixInput_6_1}; // @[pla.scala:90:45, :98:53] wire [1:0] decoder_decoded_andMatrixOutputs_hi_lo_1 = {decoder_decoded_andMatrixOutputs_andMatrixInput_2_3, decoder_decoded_andMatrixOutputs_andMatrixInput_3_3}; // @[pla.scala:90:45, :98:53] wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_3 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_3, decoder_decoded_andMatrixOutputs_andMatrixInput_1_3}; // @[pla.scala:90:45, :98:53] wire [3:0] decoder_decoded_andMatrixOutputs_hi_3 = {decoder_decoded_andMatrixOutputs_hi_hi_3, decoder_decoded_andMatrixOutputs_hi_lo_1}; // @[pla.scala:98:53] wire [6:0] _decoder_decoded_andMatrixOutputs_T_3 = {decoder_decoded_andMatrixOutputs_hi_3, decoder_decoded_andMatrixOutputs_lo_3}; // @[pla.scala:98:53] wire decoder_decoded_andMatrixOutputs_2_2 = &_decoder_decoded_andMatrixOutputs_T_3; // @[pla.scala:98:{53,70}] wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_4 = decoder_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_5 = decoder_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_6 = decoder_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_4 = decoder_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_5 = decoder_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_6 = decoder_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29] wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_4 = decoder_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_5 = decoder_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_6 = decoder_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45] wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_4 = {decoder_decoded_andMatrixOutputs_andMatrixInput_3_4, decoder_decoded_andMatrixOutputs_andMatrixInput_4_4}; // @[pla.scala:90:45, :91:29, :98:53] wire [2:0] decoder_decoded_andMatrixOutputs_lo_4 = {decoder_decoded_andMatrixOutputs_lo_hi_4, decoder_decoded_andMatrixOutputs_andMatrixInput_5_4}; // @[pla.scala:90:45, :98:53] wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_4 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_4, decoder_decoded_andMatrixOutputs_andMatrixInput_1_4}; // @[pla.scala:91:29, :98:53] wire [2:0] decoder_decoded_andMatrixOutputs_hi_4 = {decoder_decoded_andMatrixOutputs_hi_hi_4, decoder_decoded_andMatrixOutputs_andMatrixInput_2_4}; // @[pla.scala:91:29, :98:53] wire [5:0] _decoder_decoded_andMatrixOutputs_T_4 = {decoder_decoded_andMatrixOutputs_hi_4, decoder_decoded_andMatrixOutputs_lo_4}; // @[pla.scala:98:53] wire decoder_decoded_andMatrixOutputs_4_2 = &_decoder_decoded_andMatrixOutputs_T_4; // @[pla.scala:98:{53,70}] wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_5 = {decoder_decoded_andMatrixOutputs_andMatrixInput_3_5, decoder_decoded_andMatrixOutputs_andMatrixInput_4_5}; // @[pla.scala:90:45, :91:29, :98:53] wire [2:0] decoder_decoded_andMatrixOutputs_lo_5 = {decoder_decoded_andMatrixOutputs_lo_hi_5, decoder_decoded_andMatrixOutputs_andMatrixInput_5_5}; // @[pla.scala:90:45, :98:53] wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_5 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_5, decoder_decoded_andMatrixOutputs_andMatrixInput_1_5}; // @[pla.scala:90:45, :91:29, :98:53] wire [2:0] decoder_decoded_andMatrixOutputs_hi_5 = {decoder_decoded_andMatrixOutputs_hi_hi_5, decoder_decoded_andMatrixOutputs_andMatrixInput_2_5}; // @[pla.scala:91:29, :98:53] wire [5:0] _decoder_decoded_andMatrixOutputs_T_5 = {decoder_decoded_andMatrixOutputs_hi_5, decoder_decoded_andMatrixOutputs_lo_5}; // @[pla.scala:98:53] wire decoder_decoded_andMatrixOutputs_3_2 = &_decoder_decoded_andMatrixOutputs_T_5; // @[pla.scala:98:{53,70}] wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_6 = {decoder_decoded_andMatrixOutputs_andMatrixInput_4_6, decoder_decoded_andMatrixOutputs_andMatrixInput_5_6}; // @[pla.scala:90:45, :91:29, :98:53] wire [2:0] decoder_decoded_andMatrixOutputs_lo_6 = {decoder_decoded_andMatrixOutputs_lo_hi_6, decoder_decoded_andMatrixOutputs_andMatrixInput_6_2}; // @[pla.scala:90:45, :98:53] wire [1:0] decoder_decoded_andMatrixOutputs_hi_lo_2 = {decoder_decoded_andMatrixOutputs_andMatrixInput_2_6, decoder_decoded_andMatrixOutputs_andMatrixInput_3_6}; // @[pla.scala:91:29, :98:53] wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_6 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_6, decoder_decoded_andMatrixOutputs_andMatrixInput_1_6}; // @[pla.scala:90:45, :98:53] wire [3:0] decoder_decoded_andMatrixOutputs_hi_6 = {decoder_decoded_andMatrixOutputs_hi_hi_6, decoder_decoded_andMatrixOutputs_hi_lo_2}; // @[pla.scala:98:53] wire [6:0] _decoder_decoded_andMatrixOutputs_T_6 = {decoder_decoded_andMatrixOutputs_hi_6, decoder_decoded_andMatrixOutputs_lo_6}; // @[pla.scala:98:53] wire decoder_decoded_andMatrixOutputs_5_2 = &_decoder_decoded_andMatrixOutputs_T_6; // @[pla.scala:98:{53,70}] wire [1:0] decoder_decoded_orMatrixOutputs_lo = {decoder_decoded_andMatrixOutputs_1_2, decoder_decoded_andMatrixOutputs_3_2}; // @[pla.scala:98:70, :114:19] wire [1:0] decoder_decoded_orMatrixOutputs_hi = {decoder_decoded_andMatrixOutputs_6_2, decoder_decoded_andMatrixOutputs_0_2}; // @[pla.scala:98:70, :114:19] wire [3:0] _decoder_decoded_orMatrixOutputs_T = {decoder_decoded_orMatrixOutputs_hi, decoder_decoded_orMatrixOutputs_lo}; // @[pla.scala:114:19] wire _decoder_decoded_orMatrixOutputs_T_1 = |_decoder_decoded_orMatrixOutputs_T; // @[pla.scala:114:{19,36}] wire [1:0] decoder_decoded_orMatrixOutputs_hi_1 = {decoder_decoded_andMatrixOutputs_2_2, decoder_decoded_andMatrixOutputs_4_2}; // @[pla.scala:98:70, :114:19] wire [2:0] _decoder_decoded_orMatrixOutputs_T_2 = {decoder_decoded_orMatrixOutputs_hi_1, decoder_decoded_andMatrixOutputs_5_2}; // @[pla.scala:98:70, :114:19] wire _decoder_decoded_orMatrixOutputs_T_3 = |_decoder_decoded_orMatrixOutputs_T_2; // @[pla.scala:114:{19,36}] wire [1:0] decoder_decoded_orMatrixOutputs = {_decoder_decoded_orMatrixOutputs_T_3, _decoder_decoded_orMatrixOutputs_T_1}; // @[pla.scala:102:36, :114:36] wire _decoder_decoded_invMatrixOutputs_T = decoder_decoded_orMatrixOutputs[0]; // @[pla.scala:102:36, :124:31] wire _decoder_decoded_invMatrixOutputs_T_1 = decoder_decoded_orMatrixOutputs[1]; // @[pla.scala:102:36, :124:31] assign decoder_decoded_invMatrixOutputs = {_decoder_decoded_invMatrixOutputs_T_1, _decoder_decoded_invMatrixOutputs_T}; // @[pla.scala:120:37, :124:31] assign decoder_decoded = decoder_decoded_invMatrixOutputs; // @[pla.scala:81:23, :120:37] assign io_cmd_0 = decoder_0; // @[Decode.scala:50:77] assign io_cmd = io_cmd_0; // @[fpu.scala:123:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftRegisterPriorityQueue.scala: package compressacc import chisel3._ import chisel3.util._ import chisel3.util._ // TODO : support enq & deq at the same cycle class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle { val output_prev = KeyValue(keyWidth, value) val output_nxt = KeyValue(keyWidth, value) val input_prev = Flipped(KeyValue(keyWidth, value)) val input_nxt = Flipped(KeyValue(keyWidth, value)) val cmd = Flipped(Valid(UInt(1.W))) val insert_here = Input(Bool()) val cur_input_keyval = Flipped(KeyValue(keyWidth, value)) val cur_output_keyval = KeyValue(keyWidth, value) } class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module { val io = IO(new PriorityQueueStageIO(keyWidth, value)) dontTouch(io) val CMD_DEQ = 0.U val CMD_ENQ = 1.U val MAX_VALUE = (1 << keyWidth) - 1 val key_reg = RegInit(MAX_VALUE.U(keyWidth.W)) val value_reg = Reg(value) io.output_prev.key := key_reg io.output_prev.value := value_reg io.output_nxt.key := key_reg io.output_nxt.value := value_reg io.cur_output_keyval.key := key_reg io.cur_output_keyval.value := value_reg when (io.cmd.valid) { switch (io.cmd.bits) { is (CMD_DEQ) { key_reg := io.input_nxt.key value_reg := io.input_nxt.value } is (CMD_ENQ) { when (io.insert_here) { key_reg := io.cur_input_keyval.key value_reg := io.cur_input_keyval.value } .elsewhen (key_reg >= io.cur_input_keyval.key) { key_reg := io.input_prev.key value_reg := io.input_prev.value } .otherwise { // do nothing } } } } } object PriorityQueueStage { def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v) } // TODO // - This design is not scalable as the enqued_keyval is broadcasted to all the stages // - Add pipeline registers later class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle { val cnt_bits = log2Ceil(queSize+1) val counter = Output(UInt(cnt_bits.W)) val enq = Flipped(Decoupled(KeyValue(keyWidth, value))) val deq = Decoupled(KeyValue(keyWidth, value)) } class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module { val keyWidthInternal = keyWidth + 1 val CMD_DEQ = 0.U val CMD_ENQ = 1.U val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value)) dontTouch(io) val MAX_VALUE = ((1 << keyWidthInternal) - 1).U val cnt_bits = log2Ceil(queSize+1) // do not consider cases where we are inserting more entries then the queSize val counter = RegInit(0.U(cnt_bits.W)) io.counter := counter val full = (counter === queSize.U) val empty = (counter === 0.U) io.deq.valid := !empty io.enq.ready := !full when (io.enq.fire) { counter := counter + 1.U } when (io.deq.fire) { counter := counter - 1.U } val cmd_valid = io.enq.valid || io.deq.ready val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ) assert(!(io.enq.valid && io.deq.ready)) val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value))) for (i <- 0 until (queSize - 1)) { stages(i+1).io.input_prev <> stages(i).io.output_nxt stages(i).io.input_nxt <> stages(i+1).io.output_prev } stages(queSize-1).io.input_nxt.key := MAX_VALUE // stages(queSize-1).io.input_nxt.value := stages(queSize-1).io.input_nxt.value.symbol := 0.U // stages(queSize-1).io.input_nxt.value.child(0) := 0.U // stages(queSize-1).io.input_nxt.value.child(1) := 0.U stages(0).io.input_prev.key := io.enq.bits.key stages(0).io.input_prev.value <> io.enq.bits.value for (i <- 0 until queSize) { stages(i).io.cmd.valid := cmd_valid stages(i).io.cmd.bits := cmd stages(i).io.cur_input_keyval <> io.enq.bits } val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B))) for (i <- 0 until queSize) { is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key) } val is_large_or_equal_cat = Wire(UInt(queSize.W)) is_large_or_equal_cat := Cat(is_large_or_equal.reverse) val insert_here_idx = PriorityEncoder(is_large_or_equal_cat) for (i <- 0 until queSize) { when (i.U === insert_here_idx) { stages(i).io.insert_here := true.B } .otherwise { stages(i).io.insert_here := false.B } } io.deq.bits <> stages(0).io.output_prev }
module PriorityQueueStage_126( // @[ShiftRegisterPriorityQueue.scala:21:7] input clock, // @[ShiftRegisterPriorityQueue.scala:21:7] input reset, // @[ShiftRegisterPriorityQueue.scala:21:7] output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14] ); wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24] assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22] assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30] always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24] else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30] key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] end else // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] end if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7] value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30] value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] end else // @[ShiftRegisterPriorityQueue.scala:21:7] value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] end always @(posedge) assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_6( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [12:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [12:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27] wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_41 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_47 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:57:20] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28] wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28] wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [12:0] _c_first_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_first_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_first_WIRE_2_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_first_WIRE_3_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_set_wo_ready_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_set_wo_ready_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_set_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_set_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_opcodes_set_interm_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_opcodes_set_interm_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_sizes_set_interm_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_sizes_set_interm_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_opcodes_set_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_opcodes_set_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_sizes_set_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_sizes_set_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_probe_ack_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_probe_ack_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_probe_ack_WIRE_2_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_probe_ack_WIRE_3_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _same_cycle_resp_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _same_cycle_resp_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _same_cycle_resp_WIRE_2_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _same_cycle_resp_WIRE_3_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _same_cycle_resp_WIRE_4_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _same_cycle_resp_WIRE_5_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54] wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52] wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79] wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51] wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35] wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35] wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34] wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34] wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34] wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34] wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire _source_ok_T_25 = io_in_a_bits_source_0 == 7'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31] wire _source_ok_T_26 = io_in_a_bits_source_0 == 7'h21; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31] wire _source_ok_T_27 = io_in_a_bits_source_0 == 7'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31] wire _source_ok_T_28 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_8 = _source_ok_T_28; // @[Parameters.scala:1138:31] wire _source_ok_T_29 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_30 = _source_ok_T_29 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_31 = _source_ok_T_30 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_32 = _source_ok_T_31 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_34 = _source_ok_T_33 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_35 = _source_ok_T_34 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_35 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [12:0] _is_aligned_T = {7'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 13'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_36 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_36; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_37 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_43 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_49 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_55 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_38 = _source_ok_T_37 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_42 = _source_ok_T_40; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_42; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_44 = _source_ok_T_43 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_48 = _source_ok_T_46; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_48; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_50 = _source_ok_T_49 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_54; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_56 = _source_ok_T_55 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_60; // @[Parameters.scala:1138:31] wire _source_ok_T_61 = io_in_d_bits_source_0 == 7'h22; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_5 = _source_ok_T_61; // @[Parameters.scala:1138:31] wire _source_ok_T_62 = io_in_d_bits_source_0 == 7'h21; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_62; // @[Parameters.scala:1138:31] wire _source_ok_T_63 = io_in_d_bits_source_0 == 7'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_63; // @[Parameters.scala:1138:31] wire _source_ok_T_64 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_8 = _source_ok_T_64; // @[Parameters.scala:1138:31] wire _source_ok_T_65 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_66 = _source_ok_T_65 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_67 = _source_ok_T_66 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_68 = _source_ok_T_67 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_69 = _source_ok_T_68 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_70 = _source_ok_T_69 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_71 = _source_ok_T_70 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_71 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire _T_1094 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1094; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1094; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [12:0] address; // @[Monitor.scala:391:22] wire _T_1162 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1162; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1162; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1162; // @[Decoupled.scala:51:35] wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg [64:0] inflight; // @[Monitor.scala:614:27] reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [259:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [64:0] a_set; // @[Monitor.scala:626:34] wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [259:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [127:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1027 = _T_1094 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1027 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1027 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1027 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1027 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1027 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [64:0] d_clr; // @[Monitor.scala:664:34] wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_1073 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1073 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1042 = _T_1162 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1042 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1042 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1042 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [64:0] inflight_1; // @[Monitor.scala:726:35] wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [64:0] d_clr_1; // @[Monitor.scala:774:34] wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1138 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1138 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1120 = _T_1162 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1120 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1120 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1120 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113] wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File Transposer.scala: package gemmini import chisel3._ import chisel3.util._ import Util._ trait Transposer[T <: Data] extends Module { def dim: Int def dataType: T val io = IO(new Bundle { val inRow = Flipped(Decoupled(Vec(dim, dataType))) val outCol = Decoupled(Vec(dim, dataType)) }) } class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose val sMoveUp :: sMoveLeft :: Nil = Enum(2) val state = RegInit(sMoveUp) val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1) val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1) io.outCol.valid := 0.U io.inRow.ready := 0.U switch(state) { is(sMoveUp) { io.inRow.ready := upCounter <= dim.U io.outCol.valid := leftCounter > 0.U when(io.inRow.fire) { upCounter := upCounter + 1.U } when(upCounter === (dim-1).U) { state := sMoveLeft leftCounter := 0.U } when(io.outCol.fire) { leftCounter := leftCounter - 1.U } } is(sMoveLeft) { io.inRow.ready := leftCounter <= dim.U // TODO: this is naive io.outCol.valid := upCounter > 0.U when(leftCounter === (dim-1).U) { state := sMoveUp } when(io.inRow.fire) { leftCounter := leftCounter + 1.U upCounter := 0.U } when(io.outCol.fire) { upCounter := upCounter - 1.U } } } // Propagate input from bottom row to top row systolically in the move up phase // TODO: need to iterate over columns to connect Chisel values of type T // Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?) for (colIdx <- 0 until dim) { regArray.foldRight(io.inRow.bits(colIdx)) { case (regRow, prevReg) => when (state === sMoveUp) { regRow(colIdx) := prevReg } regRow(colIdx) } } // Propagate input from right side to left side systolically in the move left phase for (rowIdx <- 0 until dim) { regArrayT.foldRight(io.inRow.bits(rowIdx)) { case (regCol, prevReg) => when (state === sMoveLeft) { regCol(rowIdx) := prevReg } regCol(rowIdx) } } // Pull from the left side or the top side based on the state for (idx <- 0 until dim) { when (state === sMoveUp) { io.outCol.bits(idx) := regArray(0)(idx) }.elsewhen(state === sMoveLeft) { io.outCol.bits(idx) := regArrayT(0)(idx) }.otherwise { io.outCol.bits(idx) := DontCare } } } class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val LEFT_DIR = 0.U(1.W) val UP_DIR = 1.U(1.W) class PE extends Module { val io = IO(new Bundle { val inR = Input(dataType) val inD = Input(dataType) val outL = Output(dataType) val outU = Output(dataType) val dir = Input(UInt(1.W)) val en = Input(Bool()) }) val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en) io.outU := reg io.outL := reg } val pes = Seq.fill(dim,dim)(Module(new PE)) val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter val dir = RegInit(LEFT_DIR) // Wire up horizontal signals for (row <- 0 until dim; col <- 0 until dim) { val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL pes(row)(col).io.inR := right_in } // Wire up vertical signals for (row <- 0 until dim; col <- 0 until dim) { val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU pes(row)(col).io.inD := down_in } // Wire up global signals pes.flatten.foreach(_.io.dir := dir) pes.flatten.foreach(_.io.en := io.inRow.fire) io.outCol.valid := true.B io.inRow.ready := true.B val left_out = VecInit(pes.transpose.head.map(_.io.outL)) val up_out = VecInit(pes.head.map(_.io.outU)) io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out) when (io.inRow.fire) { counter := wrappingAdd(counter, 1.U, dim) } when (counter === (dim-1).U && io.inRow.fire) { dir := ~dir } } class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose // state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise val state = RegInit(0.U(1.W)) val countInc = io.inRow.fire || io.outCol.fire val (countValue, countWrap) = Counter(countInc, dim) io.inRow.ready := state === 0.U io.outCol.valid := state === 1.U for (i <- 0 until dim) { for (j <- 0 until dim) { when(countValue === i.U && io.inRow.fire) { regArray(i)(j) := io.inRow.bits(j) } } } for (i <- 0 until dim) { io.outCol.bits(i) := 0.U for (j <- 0 until dim) { when(countValue === j.U) { io.outCol.bits(i) := regArrayT(j)(i) } } } when (io.inRow.fire && countWrap) { state := 1.U } when (io.outCol.fire && countWrap) { state := 0.U } assert(!(state === 0.U) || !io.outCol.fire) assert(!(state === 1.U) || !io.inRow.fire) }
module PE_51( // @[Transposer.scala:100:9] input clock, // @[Transposer.scala:100:9] input reset, // @[Transposer.scala:100:9] input [7:0] io_inR, // @[Transposer.scala:101:16] input [7:0] io_inD, // @[Transposer.scala:101:16] output [7:0] io_outL, // @[Transposer.scala:101:16] output [7:0] io_outU, // @[Transposer.scala:101:16] input io_dir, // @[Transposer.scala:101:16] input io_en // @[Transposer.scala:101:16] ); wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9] wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9] wire io_dir_0 = io_dir; // @[Transposer.scala:100:9] wire io_en_0 = io_en; // @[Transposer.scala:100:9] wire [7:0] io_outL_0; // @[Transposer.scala:100:9] wire [7:0] io_outU_0; // @[Transposer.scala:100:9] wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36] wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}] reg [7:0] reg_0; // @[Transposer.scala:110:24] assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24] assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24] always @(posedge clock) begin // @[Transposer.scala:100:9] if (io_en_0) // @[Transposer.scala:100:9] reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}] always @(posedge) assign io_outL = io_outL_0; // @[Transposer.scala:100:9] assign io_outU = io_outU_0; // @[Transposer.scala:100:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: package constellation.channel import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy._ import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.util._ import constellation.noc.{HasNoCParams} class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams { val io = IO(new Bundle { val in = Input(new Channel(cParam)) }) val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B })) for (i <- 0 until cParam.srcSpeedup) { val flit = io.in.flit(i) when (flit.valid) { when (flit.bits.head) { in_flight(flit.bits.virt_channel_id) := true.B assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken") } when (flit.bits.tail) { in_flight(flit.bits.virt_channel_id) := false.B } } val possibleFlows = cParam.possibleFlows when (flit.valid && flit.bits.head) { cParam match { case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) => assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } } } } File Types.scala: package constellation.routing import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Parameters} import constellation.noc.{HasNoCParams} import constellation.channel.{Flit} /** A representation for 1 specific virtual channel in wormhole routing * * @param src the source node * @param vc ID for the virtual channel * @param dst the destination node * @param n_vc the number of virtual channels */ // BEGIN: ChannelRoutingInfo case class ChannelRoutingInfo( src: Int, dst: Int, vc: Int, n_vc: Int ) { // END: ChannelRoutingInfo require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this") require (!(src == -1 && dst == -1), s"Illegal $this") require (vc < n_vc, s"Illegal $this") val isIngress = src == -1 val isEgress = dst == -1 } /** Represents the properties of a packet that are relevant for routing * ingressId and egressId uniquely identify a flow, but vnet and dst are used here * to simplify the implementation of routingrelations * * @param ingressId packet's source ingress point * @param egressId packet's destination egress point * @param vNet virtual subnetwork identifier * @param dst packet's destination node ID */ // BEGIN: FlowRoutingInfo case class FlowRoutingInfo( ingressId: Int, egressId: Int, vNetId: Int, ingressNode: Int, ingressNodeId: Int, egressNode: Int, egressNodeId: Int, fifo: Boolean ) { // END: FlowRoutingInfo def isFlow(f: FlowRoutingBundle): Bool = { (f.ingress_node === ingressNode.U && f.egress_node === egressNode.U && f.ingress_node_id === ingressNodeId.U && f.egress_node_id === egressNodeId.U) } def asLiteral(b: FlowRoutingBundle): BigInt = { Seq( (vNetId , b.vnet_id), (ingressNode , b.ingress_node), (ingressNodeId , b.ingress_node_id), (egressNode , b.egress_node), (egressNodeId , b.egress_node_id) ).foldLeft(0)((l, t) => { (l << t._2.getWidth) | t._1 }) } } class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams { // Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination // This simplifies the routing tables val vnet_id = UInt(log2Ceil(nVirtualNetworks).W) val ingress_node = UInt(log2Ceil(nNodes).W) val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W) val egress_node = UInt(log2Ceil(nNodes).W) val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W) }
module NoCMonitor_115( // @[Monitor.scala:11:7] input clock, // @[Monitor.scala:11:7] input reset, // @[Monitor.scala:11:7] input io_in_flit_0_valid, // @[Monitor.scala:12:14] input io_in_flit_0_bits_head, // @[Monitor.scala:12:14] input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14] input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14] input [3:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14] input [2:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14] input [3:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14] ); reg in_flight_0; // @[Monitor.scala:16:26] reg in_flight_1; // @[Monitor.scala:16:26] reg in_flight_2; // @[Monitor.scala:16:26] reg in_flight_3; // @[Monitor.scala:16:26] reg in_flight_4; // @[Monitor.scala:16:26] reg in_flight_5; // @[Monitor.scala:16:26] reg in_flight_6; // @[Monitor.scala:16:26] reg in_flight_7; // @[Monitor.scala:16:26] reg in_flight_8; // @[Monitor.scala:16:26] reg in_flight_9; // @[Monitor.scala:16:26] wire _GEN = io_in_flit_0_bits_virt_channel_id == 4'h0; // @[Monitor.scala:21:46] wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 4'h1; // @[Monitor.scala:21:46] wire _GEN_1 = io_in_flit_0_bits_virt_channel_id == 4'h2; // @[Monitor.scala:21:46] wire _GEN_2 = io_in_flit_0_bits_virt_channel_id == 4'h3; // @[Monitor.scala:21:46] wire _GEN_3 = io_in_flit_0_bits_virt_channel_id == 4'h4; // @[Monitor.scala:21:46] wire _GEN_4 = io_in_flit_0_bits_virt_channel_id == 4'h5; // @[Monitor.scala:21:46] wire _GEN_5 = io_in_flit_0_bits_virt_channel_id == 4'h6; // @[Monitor.scala:21:46] wire _GEN_6 = io_in_flit_0_bits_virt_channel_id == 4'h7; // @[Monitor.scala:21:46]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_14( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_14 io_out_sink_extend ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule